+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Gopkg.lock b/Gopkg.lock
new file mode 100644
index 0000000..b083bc3
--- /dev/null
+++ b/Gopkg.lock
@@ -0,0 +1,89 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ digest = "1:cd6d7020946fde61e471ae29009f27abb0382d0b525f624d60a71ba48d25a703"
+ name = "github.com/aws/aws-sdk-go-v2"
+ packages = [
+ "aws",
+ "aws/awserr",
+ "aws/defaults",
+ "aws/ec2metadata",
+ "aws/ec2rolecreds",
+ "aws/endpointcreds",
+ "aws/endpoints",
+ "aws/external",
+ "aws/signer/v4",
+ "aws/stscreds",
+ "internal/awsutil",
+ "internal/ini",
+ "internal/sdk",
+ "private/protocol",
+ "private/protocol/json/jsonutil",
+ "private/protocol/jsonrpc",
+ "private/protocol/query",
+ "private/protocol/query/queryutil",
+ "private/protocol/rest",
+ "private/protocol/xml/xmlutil",
+ "service/dynamodb",
+ "service/dynamodb/dynamodbattribute",
+ "service/sts",
+ ]
+ pruneopts = "UT"
+ revision = "d52522b5f4b95591ff6528d7c54923951aadf099"
+ version = "v2.0.0-preview.5"
+
+[[projects]]
+ digest = "1:c79fb010be38a59d657c48c6ba1d003a8aa651fa56b579d959d74573b7dff8e1"
+ name = "github.com/gorilla/context"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
+ version = "v1.1.1"
+
+[[projects]]
+ digest = "1:664d37ea261f0fc73dd17f4a1f5f46d01fbb0b0d75f6375af064824424109b7d"
+ name = "github.com/gorilla/handlers"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "7e0847f9db758cdebd26c149d0ae9d5d0b9c98ce"
+ version = "v1.4.0"
+
+[[projects]]
+ digest = "1:e73f5b0152105f18bc131fba127d9949305c8693f8a762588a82a48f61756f5f"
+ name = "github.com/gorilla/mux"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
+ version = "v1.6.2"
+
+[[projects]]
+ digest = "1:e22af8c7518e1eab6f2eab2b7d7558927f816262586cd6ed9f349c97a6c285c4"
+ name = "github.com/jmespath/go-jmespath"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "0b12d6b5"
+
+[[projects]]
+ digest = "1:274f67cb6fed9588ea2521ecdac05a6d62a8c51c074c1fccc6a49a40ba80e925"
+ name = "github.com/satori/go.uuid"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
+ version = "v1.2.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "github.com/aws/aws-sdk-go-v2/aws",
+ "github.com/aws/aws-sdk-go-v2/aws/endpoints",
+ "github.com/aws/aws-sdk-go-v2/aws/external",
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb",
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute",
+ "github.com/gorilla/handlers",
+ "github.com/gorilla/mux",
+ "github.com/satori/go.uuid",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
new file mode 100644
index 0000000..ce1d42f
--- /dev/null
+++ b/Gopkg.toml
@@ -0,0 +1,38 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+#
+# [prune]
+# non-go = false
+# go-tests = true
+# unused-packages = true
+
+
+[prune]
+ go-tests = true
+ unused-packages = true
+
+[[constraint]]
+ name = "github.com/satori/go.uuid"
+ version = "1.2.0"
+
+[[constraint]]
+ name = "github.com/aws/aws-sdk-go-v2"
+ version = "2.0.0-preview.5"
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..2161254
--- /dev/null
+++ b/README.md
@@ -0,0 +1,79 @@
+# Boldly Go REST
+
+Simple RESTful service to demonstrate the ease of setting a HTTP service using Go.
+
+## Data Storage
+
+This service uses DynamoDB to persist data. Check out the [docs](https://aws.amazon.com/dynamodb/) for more information.
+
+### AWS Access
+
+To access your AWS DynamoDB tables, you will need an AWS account with an IAM user that has access to Read, Write DynamoDB tables.
+Once the IAM user is created, get the access key and secret and store them in environment variables:
+
+- `AWS_ACCESS_KEY`: The IAM user access key
+- `AWS_SECRET_KEY`: The IAM user secret
+
+## Dependency Management
+
+This service uses [go dep](https://github.com/golang/dep) for the dependency management tool. After pulling the code down,
+run `dep ensure`; this will install necessary dependencies to the project and get it ready for running.
+
+## Endpoints
+
+- Ping: Health Check for the service.
+ - endpoint: `GET /api/v1/ping`
+ - CURL Example
+ ```bash
+ curl -XGET http://localhost:5002/api/v1/ping
+ ```
+ - Example Response:
+ ```json
+ {
+ "version": "0.0.1",
+ "health": "HEALTHY",
+ "msg": "Looking good, beautiful"
+ }
+ ```
+- Get Bank: Get a User Bank Record
+ - endpoint: `GET /api/v1/user/{owningUserId}/bank/{bankId}`; where
+ - `{owningUserId}` is the id of the user that the bank belongs to &
+ - `{bankId}` is the unique id of the bank.
+ - CURL example
+ ```bash
+ curl -XGET http://localhost:5002/api/v1/user/4b7b2def-e76e-48bf-993b-8ec2b193b855/bank/01e173f4-02a2-4310-a7cc-e2b919f13aac
+ ```
+ - Example Response:
+ ```json
+ {
+ "owningUserId": "4b7b2def-e76e-48bf-993b-8ec2b193b855",
+ "bankId": "01e173f4-02a2-4310-a7cc-e2b919f13aac",
+ "bankName": "US Bank",
+ "accountNumber": "2112"
+ }
+ ```
+- Save Bank: Save a new User Bank record
+ - endpoint: `POST /api/v1/bank`
+ - Example Request Body
+ ```json
+ {
+ "owningUserId": "4b7b2def-e76e-48bf-993b-8ec2b193b855",
+ "bankName": "BANK NAME",
+ "accountNumber": "1234"
+ }
+ ```
+ - CURL example
+ ```bash
+ curl -X POST -H "Content-Type: application/json" \
+ -d '{"owningUserId": "4b7b2def-e76e-48bf-993b-8ec2b193b855", "bankName": "BANK NAME", "accountNumber": "1234"}' \
+ http://localhost:5002/api/v1/bank
+ ```
+ - Example Response
+ ```json
+ {
+ "owningUserId": "4b7b2def-e76e-48bf-993b-8ec2b193b855",
+ "bankId": "b920cfc7-c455-4ac6-b856-f9d3a416d9d1",
+ "bankName": "BANK NAME",
+ "accountNumber": "1234"
+ }
+ ```
\ No newline at end of file
diff --git a/aws.go b/aws.go
new file mode 100644
index 0000000..9ba90df
--- /dev/null
+++ b/aws.go
@@ -0,0 +1,47 @@
+/*
+AWS Configuration/Initialization.
+
+Instantiates a session with the AWS SDK for use and opens/exposes a connection to a DynamoDB instance.
+*/
+package main
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws/endpoints"
+ "github.com/aws/aws-sdk-go-v2/aws/external"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+)
+
+type AwsConfig interface {
+ Init()
+ DynamoDbSvc() *dynamodb.DynamoDB
+}
+
+type awsConf struct {
+ dynamodbSvc *dynamodb.DynamoDB
+}
+
+/*
+Initialize the AWS Service.
+
+ Uses the AWS_ACCESS_KEY & AWS_SECRET_KEY values stored in the environment to connect to the AWS Account.
+
+ Once the credentials are loaded, instantiate a new DynamoDB service instance
+*/
+func (c *awsConf) Init() {
+ // establish the aws config with the env access key and secret
+ cfg, err := external.LoadDefaultAWSConfig()
+ if err != nil {
+ panic(err)
+ }
+ cfg.Region = endpoints.UsEast1RegionID
+ // use config to build dynamodb svc
+ c.dynamodbSvc = dynamodb.New(cfg)
+ fmt.Println("AWS Service Initiated")
+}
+
+// Expose the DynamoDb service instance
+func (c *awsConf) DynamoDbSvc() *dynamodb.DynamoDB {
+ return c.dynamodbSvc
+}
diff --git a/entities.go b/entities.go
new file mode 100644
index 0000000..060814a
--- /dev/null
+++ b/entities.go
@@ -0,0 +1,14 @@
+package main
+
+type Ping struct {
+ Version string `json:"version"`
+ Health string `json:"health"`
+ Msg string `json:"msg"`
+}
+
+type Bank struct {
+ OwningUserId string `json:"owningUserId"`
+ BankId string `json:"bankId"`
+ BankName string `json:"bankName"`
+ AccountNumber string `json:"accountNumber"`
+}
diff --git a/handlers.go b/handlers.go
new file mode 100644
index 0000000..b30bcbe
--- /dev/null
+++ b/handlers.go
@@ -0,0 +1,75 @@
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/satori/go.uuid"
+
+ "github.com/gorilla/mux"
+)
+
+// Http Handler for the Ping Endpoint (/api/v1/ping)
+// When Ping is hit, Call the DoPing() method and return the results.
+func PingHandler(w http.ResponseWriter, r *http.Request) {
+ ping := DoPing()
+ js, err := json.Marshal(ping)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(js)
+ return
+}
+
+// Http Handler for the GetBank endpoint (/api/v1/user/{owningUserId}/bank/{bankId})
+// Grab the owningUserId and bankId out of the route params and use them to get a Bank record
+func GetBankHandler(w http.ResponseWriter, r *http.Request) {
+ params := mux.Vars(r)
+ owningUserId := params["owningUserId"] // get the owning user id from request route params
+ _owningUserId := uuid.FromStringOrNil(owningUserId) // convert string id to uuid
+ bankId := params["bankId"] // get the bank id from the request route params
+ _bankId := uuid.FromStringOrNil(bankId) // convert bank id string to uuid
+ bank, err := GetBank(_owningUserId, _bankId) // get bank record
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ js, err := json.Marshal(bank) // convert bank struct to JSON
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(js)
+ return
+}
+
+// Http Handler for the SaveBank endpoint (/api/v1/bank)
+// Gets the bank out of the request body and saves it
+func SaveBankHandler(w http.ResponseWriter, r *http.Request) {
+ var bank Bank
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ json.Unmarshal(b, &bank)
+ // call save bank service call
+ created, err := bank.SaveBank()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ // marshal to JSON && return the created bank
+ js, err := json.Marshal(created) // convert bank struct to JSON
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(js)
+ return
+}
diff --git a/main.go b/main.go
new file mode 100644
index 0000000..024bb69
--- /dev/null
+++ b/main.go
@@ -0,0 +1,48 @@
+/*
+Main entry point for Boldly Go RESTful Application.
+
+ Simple REST API to expose a few endpoints to get a feel for GoLang.
+
+ Endpoints:
+ - /api/v1/ping // HEALTH CHECK PING ENDPOINT
+ - /api/v1/user/{owningUserId}/bank/{bankId} // GET THE USERS BANK RECORD
+ - /api/v1/bank // SAVE A BANK RECORD
+*/
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+
+ "github.com/gorilla/handlers"
+ "github.com/gorilla/mux"
+)
+
+const appPortKey = ":5002"
+
+var awsSvc AwsConfig = &awsConf{}
+
+func main() {
+ // instantiate aws configuration
+ awsSvc.Init()
+ // instantiate mux router
+ router := mux.NewRouter().StrictSlash(true)
+ router.Methods("GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS").Schemes("http")
+ // give me that api path-prefix
+ api := router.PathPrefix("/api").Subrouter()
+ // endpoint registry
+ api.HandleFunc("/v1/ping", PingHandler).Methods("GET")
+ api.HandleFunc("/v1/user/{owningUserId}/bank/{bankId}", GetBankHandler).Methods("GET")
+ api.HandleFunc("/v1/bank", SaveBankHandler).Methods("POST")
+ // add CORS acceptance to all requests
+ handler := handlers.CORS(
+ handlers.AllowedOrigins([]string{"*"}),
+ handlers.AllowedMethods([]string{"GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"}),
+ handlers.AllowedHeaders([]string{"Content-Type", "X-Requested-With", "Accept", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization"}),
+ )(api)
+ // start app
+ fmt.Println(fmt.Sprintf("App Running on Port %s", appPortKey))
+ log.Fatal(http.ListenAndServe(appPortKey, handlers.LoggingHandler(os.Stdout, handler)))
+}
diff --git a/services.go b/services.go
new file mode 100644
index 0000000..183bd82
--- /dev/null
+++ b/services.go
@@ -0,0 +1,67 @@
+package main
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/satori/go.uuid"
+
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute"
+)
+
+func DoPing() Ping {
+ return Ping{
+ Version: "0.0.1",
+ Health: "HEALTHY",
+ Msg: "Looking good, beautiful",
+ }
+}
+
+// Get a unique Bank record by the owningUserId and bankId composite key.
+// Use the AWS DynamoDB service instance to query the UserBanks table in DynamoDB by the Primary & Sort key
+func GetBank(owningUserId, bankId uuid.UUID) (bank *Bank, err error) {
+ req := awsSvc.DynamoDbSvc().GetItemRequest(&dynamodb.GetItemInput{
+ TableName: aws.String("UserBanks"),
+ Key: map[string]dynamodb.AttributeValue{
+ "owningUserId": {
+ S: aws.String(owningUserId.String()),
+ },
+ "bankId": {
+ S: aws.String(bankId.String()),
+ },
+ },
+ })
+ output, err := req.Send()
+ if err != nil {
+ return nil, err
+ }
+ // unmarshal map
+ bank = new(Bank)
+ err = dynamodbattribute.UnmarshalMap(output.Item, &bank)
+ if err != nil {
+ return nil, err
+ }
+ return bank, nil
+}
+
+// Save the Bank record to the UserBanks table in DynamoDB.
+// Use the AWS DynamoDB Service to marshal the bank and save the item in the table
+func (bank *Bank) SaveBank() (*Bank, error) {
+ bank.BankId = uuid.NewV4().String() // set bank id
+ // marshal to map for dynamo input
+ bankMap, err := dynamodbattribute.MarshalMap(bank)
+ if err != nil {
+ return nil, err
+ }
+ // build put item request
+ input := &dynamodb.PutItemInput{
+ Item: bankMap,
+ TableName: aws.String("UserBanks"),
+ }
+ // save item to db
+ req := awsSvc.DynamoDbSvc().PutItemRequest(input)
+ _, err = req.Send()
+ if err != nil {
+ return nil, err
+ }
+ return bank, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
new file mode 100644
index 0000000..5f14d11
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/error.go
new file mode 100644
index 0000000..56fdfc2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/error.go
@@ -0,0 +1,145 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+ // Satisfy the base Error interface.
+ Error
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ var errs []error
+ if origErr != nil {
+ errs = append(errs, origErr)
+ }
+ return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+ return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Println("Error:", err.Error())
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/types.go
new file mode 100644
index 0000000..0202a00
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/awserr/types.go
@@ -0,0 +1,194 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+ switch len(b.errs) {
+ case 0:
+ return nil
+ case 1:
+ return b.errs[0]
+ default:
+ if err, ok := b.errs[0].(Error); ok {
+ return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+ }
+ return NewBatchError("BatchedErrors",
+ "multiple errors occurred", b.errs)
+ }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+ if b, ok := r.awsError.(BatchedErrors); ok {
+ return b.OrigErrs()
+ }
+ return []error{r.OrigErr()}
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += fmt.Sprintf("%s", e[i].Error())
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/chain_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/chain_provider.go
new file mode 100644
index 0000000..c78dfac
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/chain_provider.go
@@ -0,0 +1,75 @@
+package aws
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Credentials, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a CredentialsProvider is found which returns valid credentials Credentials ChainProvider
+// will cache that CredentialsProvider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// via the environment variables. If there are none ChainProvider will check
+// the next CredentialsProvider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := aws.NewChainCredentials(
+// []aws.CredentialsProvider{
+// &credentials.EnvProvider{},
+// &ec2rolecreds.EC2RoleProvider{
+// Client: ec2metadata.New(cfg),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// cfg := cfg.Copy()
+// cfg.Credentials = creds
+// svc := ec2.New(cfg)
+//
+type ChainProvider struct {
+ SafeCredentialsProvider
+
+ Providers []CredentialsProvider
+}
+
+// NewChainProvider returns a pointer to a new ChainProvider value wrapping
+// a chain of credentials providers.
+func NewChainProvider(providers []CredentialsProvider) *ChainProvider {
+ p := &ChainProvider{
+ Providers: append([]CredentialsProvider{}, providers...),
+ }
+ p.RetrieveFn = p.retrieveFn
+
+ return p
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) retrieveFn() (Credentials, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+
+ return Credentials{},
+ awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/client.go
new file mode 100644
index 0000000..f5b63bd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/client.go
@@ -0,0 +1,85 @@
+package aws
+
+import (
+ "net/http"
+)
+
+// Metadata wraps immutable data from the Client structure.
+type Metadata struct {
+ ServiceName string
+ APIVersion string
+
+ Endpoint string
+ SigningName string
+ SigningRegion string
+
+ JSONVersion string
+ TargetPrefix string
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ Metadata Metadata
+
+ Config Config
+
+ Region string
+ Credentials CredentialsProvider
+ EndpointResolver EndpointResolver
+ Handlers Handlers
+ Retryer Retryer
+
+ // TODO replace with value not pointer
+ LogLevel LogLevel
+ Logger Logger
+
+ HTTPClient *http.Client
+}
+
+// NewClient will return a pointer to a new initialized service client.
+func NewClient(cfg Config, metadata Metadata) *Client {
+ svc := &Client{
+ Metadata: metadata,
+
+ // TODO remove config when request reqfactored
+ Config: cfg,
+
+ Region: cfg.Region,
+ Credentials: cfg.Credentials,
+ EndpointResolver: cfg.EndpointResolver,
+ Handlers: cfg.Handlers.Copy(),
+ Retryer: cfg.Retryer,
+
+ LogLevel: cfg.LogLevel,
+ Logger: cfg.Logger,
+ }
+
+ retryer := cfg.Retryer
+ if retryer == nil {
+ // TODO need better way of specifing default num retries
+ retryer = DefaultRetryer{NumMaxRetries: 3}
+ }
+ svc.Retryer = retryer
+
+ svc.AddDebugHandlers()
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *Operation, params interface{}, data interface{}) *Request {
+ return New(c.Config, c.Metadata, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFrontNamed(NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
+ c.Handlers.Send.PushBackNamed(NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/client_logger.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/client_logger.go
new file mode 100644
index 0000000..f3e2e86
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/client_logger.go
@@ -0,0 +1,105 @@
+package aws
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http/httputil"
+)
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+------------------------------------------------------`
+
+type logWriter struct {
+ // Logger is what we will use to log the payload of a response.
+ Logger Logger
+ // buf stores the contents of what has been read
+ buf *bytes.Buffer
+}
+
+func (logger *logWriter) Write(b []byte) (int, error) {
+ return logger.buf.Write(b)
+}
+
+type teeReaderCloser struct {
+ // io.Reader will be a tee reader that is used during logging.
+ // This structure will read from a body and write the contents to a logger.
+ io.Reader
+ // Source is used just to close when we are done reading.
+ Source io.ReadCloser
+}
+
+func (reader *teeReaderCloser) Close() error {
+ return reader.Source.Close()
+}
+
+func logRequest(r *Request) {
+ logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody)
+ dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.Metadata.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ if logBody {
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.ResetBody()
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.Metadata.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *Request) {
+ lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
+ r.HTTPResponse.Body = &teeReaderCloser{
+ Reader: io.TeeReader(r.HTTPResponse.Body, lw),
+ Source: r.HTTPResponse.Body,
+ }
+
+ handlerFn := func(req *Request) {
+ body, err := httputil.DumpResponse(req.HTTPResponse, false)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.Metadata.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ b, err := ioutil.ReadAll(lw.buf)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.Metadata.ServiceName, req.Operation.Name, err))
+ return
+ }
+ lw.Logger.Log(fmt.Sprintf(logRespMsg, req.Metadata.ServiceName, req.Operation.Name, string(body)))
+ if req.Config.LogLevel.Matches(LogDebugWithHTTPBody) {
+ lw.Logger.Log(string(b))
+ }
+ }
+
+ const handlerName = "awsdk.client.LogResponse.ResponseBody"
+
+ r.Handlers.Unmarshal.SetBackNamed(NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+ r.Handlers.UnmarshalError.SetBackNamed(NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
new file mode 100644
index 0000000..4605415
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
@@ -0,0 +1,98 @@
+package aws
+
+import (
+ "net/http"
+)
+
+// A Config provides service configuration for service clients.
+type Config struct {
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // See http://docs.aws.amazon.com/general/latest/gr/rande.html for
+ // information on AWS regions.
+ Region string
+
+ // The credentials object to use when signing requests. Defaults to a
+ // chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials CredentialsProvider
+
+ // The resolver to use for looking up endpoints for AWS service clients
+ // to use based on region.
+ EndpointResolver EndpointResolver
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // TODO document
+ Handlers Handlers
+
+ // Retryer guides how HTTP requests should be retried in case of
+ // recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the client.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ Retryer Retryer
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel LogLevel
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry regardless of whether or not if request.Retryable is set.
+ // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+ // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+ // Proper handling of the request.Retryable field is important when setting this field.
+ //
+ // TODO this config field is depercated and needs removed.
+ EnforceShouldRetryCheck bool
+
+ // DisableRestProtocolURICleaning will not clean the URL path when making
+ // rest protocol requests. Will default to false. This would only be used
+ // for empty directory names in s3 requests.
+ //
+ // Example:
+ // cfg, err := external.LoadDefaultAWSConfig()
+ // cfg.DisableRestProtocolURICleaning = true
+ //
+ // svc := s3.New(cfg)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("//foo//bar//moo"),
+ // })
+ //
+ // TODO need better way of representing support for this concept. Not on Config.
+ DisableRestProtocolURICleaning bool
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c Config) Copy() Config {
+ cp := c
+ cp.Handlers = cp.Handlers.Copy()
+
+ return cp
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error.go
new file mode 100644
index 0000000..5fe8be6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error.go
@@ -0,0 +1,19 @@
+// +build !appengine,!plan9
+
+package aws
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+func isErrConnectionReset(err error) bool {
+ if opErr, ok := err.(*net.OpError); ok {
+ if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
+ return sysErr.Err == syscall.ECONNRESET
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error_other.go
new file mode 100644
index 0000000..ca8422e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/connection_reset_error_other.go
@@ -0,0 +1,11 @@
+// +build appengine plan9
+
+package aws
+
+import (
+ "strings"
+)
+
+func isErrConnectionReset(err error) bool {
+ return strings.Contains(err.Error(), "connection reset")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go
new file mode 100644
index 0000000..79f4268
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go
@@ -0,0 +1,71 @@
+package aws
+
+import (
+ "time"
+)
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return backgroundCtx
+}
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_6.go
new file mode 100644
index 0000000..8fdda53
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_6.go
@@ -0,0 +1,41 @@
+// +build !go1.7
+
+package aws
+
+import "time"
+
+// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
+// provide a 1.6 and 1.5 safe version of context that is compatible with Go
+// 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case backgroundCtx:
+ return "aws.BackgroundContext"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ backgroundCtx = new(emptyCtx)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_7.go
new file mode 100644
index 0000000..064f75c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context_1_7.go
@@ -0,0 +1,9 @@
+// +build go1.7
+
+package aws
+
+import "context"
+
+var (
+ backgroundCtx = context.Background()
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/convert_types.go
new file mode 100644
index 0000000..ff5d58e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/convert_types.go
@@ -0,0 +1,387 @@
+package aws
+
+import "time"
+
+// String returns a pointer to the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// SecondsTimeValue converts an int64 pointer to a time.Time value
+// representing seconds since Epoch or time.Time{} if the pointer is nil.
+func SecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix((*v / 1000), 0)
+ }
+ return time.Time{}
+}
+
+// MillisecondsTimeValue converts an int64 pointer to a time.Time value
+// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
+func MillisecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix(0, (*v * 1000000))
+ }
+ return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
new file mode 100644
index 0000000..db93f59
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
@@ -0,0 +1,138 @@
+package aws
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+// NeverExpire is the time identifier used when a credential provider's
+// credentials will not expire. This is used in cases where a non-expiring
+// provider type cannot be used.
+var NeverExpire = time.Unix(math.MaxInt64, 0)
+
+// AnonymousCredentials is an empty CredentialProvider that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// s3Cfg := cfg.Copy()
+// s3cfg.Credentials = AnonymousCredentials
+//
+// svc := s3.New(s3Cfg)
+var AnonymousCredentials = StaticCredentialsProvider{
+ Value: Credentials{Source: "AnonymousCredentials"},
+}
+
+// An Expiration provides wrapper around time with expiration related methods.
+type Expiration time.Time
+
+// Expired returns if the time has expired.
+
+// A Credentials is the AWS credentials value for individual credential fields.
+type Credentials struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Source of the credentials
+ Source string
+
+ // Time the credentials will expire.
+ CanExpire bool
+ Expires time.Time
+}
+
+// Expired returns if the credetials have expired.
+func (v Credentials) Expired() bool {
+ if v.CanExpire {
+ return !v.Expires.After(sdk.NowTime())
+ }
+
+ return false
+}
+
+// HasKeys returns if the credentials keys are set.
+func (v Credentials) HasKeys() bool {
+ return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0
+}
+
+// A CredentialsProvider is the interface for any component which will provide credentials
+// Credentials. A CredentialsProvider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The CredentialsProvider should not need to implement its own mutexes, because
+// that will be managed by CredentialsLoader.
+type CredentialsProvider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Credentials, error)
+
+ // TODO should Retrieve take a context?
+}
+
+// SafeCredentialsProvider provides caching and concurrency safe credentials
+// retrieval via the RetrieveFn.
+type SafeCredentialsProvider struct {
+ RetrieveFn func() (Credentials, error)
+
+ creds atomic.Value
+ m sync.Mutex
+}
+
+// Retrieve returns the credentials. If the credentials have already been
+// retrieved, and not expired the cached credentials will be returned. If the
+// credentails have not been retrieved yet, or expired RetrieveFn will be called.
+//
+// Retruns and error if RetrieveFn returns an error.
+func (p *SafeCredentialsProvider) Retrieve() (Credentials, error) {
+ if creds := p.getCreds(); creds != nil {
+ return *creds, nil
+ }
+
+ p.m.Lock()
+ defer p.m.Unlock()
+
+ // Make sure another goroutine didn't already update the credentials.
+ if creds := p.getCreds(); creds != nil {
+ return *creds, nil
+ }
+
+ creds, err := p.RetrieveFn()
+ if err != nil {
+ return Credentials{}, err
+ }
+ p.creds.Store(&creds)
+
+ return creds, nil
+}
+
+func (p *SafeCredentialsProvider) getCreds() *Credentials {
+ v := p.creds.Load()
+ if v == nil {
+ return nil
+ }
+
+ c := v.(*Credentials)
+ if c != nil && c.HasKeys() && !c.Expired() {
+ return c
+ }
+
+ return nil
+}
+
+// Invalidate will invalidate the cached credentials. The next call to Retrieve
+// will cause RetrieveFn to be called.
+func (p *SafeCredentialsProvider) Invalidate() {
+ p.creds.Store((*Credentials)(nil))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/default_retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/default_retryer.go
new file mode 100644
index 0000000..a08af80
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/default_retryer.go
@@ -0,0 +1,136 @@
+package aws
+
+import (
+ "math/rand"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+// type retryer struct {
+// client.DefaultRetryer
+// }
+//
+// // This implementation always has 100 max retries
+// func (d retryer) MaxRetries() int { return 100 }
+type DefaultRetryer struct {
+ NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *Request) time.Duration {
+ // Set the upper limit of delay in retrying at ~five minutes
+ minTime := 30
+ throttle := d.shouldThrottle(r)
+ if throttle {
+ if delay, ok := getRetryDelay(r); ok {
+ return delay
+ }
+
+ minTime = 500
+ }
+
+ retryCount := r.RetryCount
+ if retryCount > 13 {
+ retryCount = 13
+ } else if throttle && retryCount > 8 {
+ retryCount = 8
+ }
+
+ delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
+ return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *Request) bool {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable != nil {
+ return *r.Retryable
+ }
+
+ if r.HTTPResponse.StatusCode >= 500 {
+ return true
+ }
+ return r.IsErrorRetryable() || d.shouldThrottle(r)
+}
+
+// ShouldThrottle returns true if the request should be throttled.
+func (d DefaultRetryer) shouldThrottle(r *Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 502:
+ case 503:
+ case 504:
+ default:
+ return r.IsErrorThrottle()
+ }
+
+ return true
+}
+
+// This will look in the Retry-After header, RFC 7231, for how long
+// it will wait before attempting another request
+func getRetryDelay(r *Request) (time.Duration, bool) {
+ if !canUseRetryAfterHeader(r) {
+ return 0, false
+ }
+
+ delayStr := r.HTTPResponse.Header.Get("Retry-After")
+ if len(delayStr) == 0 {
+ return 0, false
+ }
+
+ delay, err := strconv.Atoi(delayStr)
+ if err != nil {
+ return 0, false
+ }
+
+ return time.Duration(delay) * time.Second, true
+}
+
+// Will look at the status code to see if the retry header pertains to
+// the status code.
+func canUseRetryAfterHeader(r *Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 503:
+ default:
+ return false
+ }
+
+ return true
+}
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go
new file mode 100644
index 0000000..fb92d42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go
@@ -0,0 +1,92 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but external.Config
+// instead. This package is useful when you need to reset the defaults
+// of a service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "log"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/endpoints"
+)
+
+// Logger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func Logger() aws.Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client.
+func Config() aws.Config {
+ return aws.Config{
+ EndpointResolver: endpoints.NewDefaultResolver(),
+ Credentials: aws.AnonymousCredentials,
+ HTTPClient: HTTPClient(),
+ Logger: Logger(),
+ Handlers: Handlers(),
+ }
+}
+
+// HTTPClient will return a new HTTP Client configured for the SDK.
+//
+// Does not use http.DefaultClient nor http.DefaultTransport.
+func HTTPClient() *http.Client {
+ return &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ MaxIdleConns: 100,
+ MaxIdleConnsPerHost: 10,
+ IdleConnTimeout: 30 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 5 * time.Second,
+ },
+ }
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client.
+func Handlers() aws.Handlers {
+ var handlers aws.Handlers
+
+ handlers.Validate.PushBackNamed(ValidateEndpointHandler)
+ handlers.Validate.PushBackNamed(ValidateParametersHandler)
+ handlers.Validate.AfterEachFn = aws.HandlerListStopOnError
+ handlers.Build.PushBackNamed(SDKVersionUserAgentHandler)
+ handlers.Build.PushBackNamed(AddHostExecEnvUserAgentHander)
+ handlers.Build.AfterEachFn = aws.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(ValidateReqSigHandler)
+ handlers.Send.PushBackNamed(SendHandler)
+ handlers.AfterRetry.PushBackNamed(AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(ValidateResponseHandler)
+
+ return handlers
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/handlers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/handlers.go
new file mode 100644
index 0000000..cebd006
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/handlers.go
@@ -0,0 +1,229 @@
+package defaults
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be added to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = aws.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *aws.Request) {
+ var length int64
+
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ = strconv.ParseInt(slength, 10, 64)
+ } else {
+ switch body := r.Body.(type) {
+ case nil:
+ length = 0
+ case lener:
+ length = int64(body.Len())
+ case io.Seeker:
+ r.BodyStart, _ = body.Seek(0, 1)
+ end, _ := body.Seek(0, 2)
+ body.Seek(r.BodyStart, 0) // make sure to seek back to original location
+ length = end - r.BodyStart
+ default:
+ panic("Cannot get length of body, must provide `ContentLength`")
+ }
+ }
+
+ if length > 0 {
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+ } else {
+ r.HTTPRequest.ContentLength = 0
+ r.HTTPRequest.Header.Del("Content-Length")
+ }
+}}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// ValidateReqSigHandler is a request handler to ensure that the request's
+// signature doesn't expire before it is sent. This can happen when a request
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
+var ValidateReqSigHandler = aws.NamedHandler{
+ Name: "core.ValidateReqSigHandler",
+ Fn: func(r *aws.Request) {
+ // Unsigned requests are not signed
+ if r.Config.Credentials == aws.AnonymousCredentials {
+ return
+ }
+
+ signedTime := r.Time
+ if !r.LastSignedAt.IsZero() {
+ signedTime = r.LastSignedAt
+ }
+
+ // 10 minutes to allow for some clock skew/delays in transmission.
+ // Would be improved with aws/aws-sdk-go#423
+ if signedTime.Add(10 * time.Minute).After(time.Now()) {
+ return
+ }
+
+ r.Sign()
+ },
+}
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = aws.NamedHandler{
+ Name: "core.SendHandler",
+ Fn: func(r *aws.Request) {
+ sender := sendFollowRedirects
+ if r.DisableFollowRedirects {
+ sender = sendWithoutFollowRedirects
+ }
+
+ if aws.NoBody == r.HTTPRequest.Body {
+ // Strip off the request body if the NoBody reader was used as a
+ // place holder for a request body. This prevents the SDK from
+ // making requests with a request body when it would be invalid
+ // to do so.
+ //
+ // Use a shallow copy of the http.Request to ensure the race condition
+ // of transport on Body will not trigger
+ reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
+ reqCopy.Body = nil
+ r.HTTPRequest = &reqCopy
+ defer func() {
+ r.HTTPRequest = reqOrig
+ }()
+ }
+
+ var err error
+ r.HTTPResponse, err = sender(r)
+ if err != nil {
+ handleSendError(r, err)
+ }
+ },
+}
+
+func sendFollowRedirects(r *aws.Request) (*http.Response, error) {
+ return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *aws.Request) (*http.Response, error) {
+ transport := r.Config.HTTPClient.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *aws.Request, err error) {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other URL redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(aws.ErrCodeRequestCanceled,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = aws.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *aws.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = aws.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *aws.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil || r.Config.EnforceShouldRetryCheck {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+
+ if err := sdk.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+ r.Error = awserr.New(aws.ErrCodeRequestCanceled,
+ "request context canceled", err)
+ r.Retryable = aws.Bool(false)
+ return
+ }
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if p, ok := r.Config.Credentials.(sdk.Invalidator); ok && r.IsErrorExpired() {
+ p.Invalidate()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+}}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = aws.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *aws.Request) {
+ if r.Metadata.SigningRegion == "" && r.Config.Region == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.Metadata.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/param_validator.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/param_validator.go
new file mode 100644
index 0000000..cbac627
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/param_validator.go
@@ -0,0 +1,19 @@
+package defaults
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = aws.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *aws.Request) {
+ if !r.ParamsFilled() {
+ return
+ }
+
+ if v, ok := r.Params.(aws.Validator); ok {
+ if err := v.Validate(); err != nil {
+ r.Error = err
+ }
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/user_agent_handlers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/user_agent_handlers.go
new file mode 100644
index 0000000..c911f06
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/user_agent_handlers.go
@@ -0,0 +1,36 @@
+package defaults
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
+// to the user agent.
+var SDKVersionUserAgentHandler = aws.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: aws.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+const execEnvUAKey = `exec_env`
+
+// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
+// execution environment to the user agent.
+//
+// If the environment variable AWS_EXECUTION_ENV is set, its value will be
+// appended to the user agent string.
+var AddHostExecEnvUserAgentHander = aws.NamedHandler{
+ Name: "core.AddHostExecEnvUserAgentHander",
+ Fn: func(r *aws.Request) {
+ v := os.Getenv(execEnvVar)
+ if len(v) == 0 {
+ return
+ }
+
+ aws.AddToUserAgent(r, execEnvUAKey+"/"+v)
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go
new file mode 100644
index 0000000..4fcb616
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go
@@ -0,0 +1,56 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.StringValue(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.StringValueSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/api.go
new file mode 100644
index 0000000..3233acd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/api.go
@@ -0,0 +1,162 @@
+package ec2metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ op := &aws.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "meta-data", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+
+ return output.Content, req.Send()
+}
+
+// GetUserData returns the userdata that was configured for the service. If
+// there is no user-data setup for the EC2 instance a "NotFoundError" error
+// code will be returned.
+func (c *EC2Metadata) GetUserData() (string, error) {
+ op := &aws.Operation{
+ Name: "GetUserData",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "user-data"),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ req.Handlers.UnmarshalError.PushBack(func(r *aws.Request) {
+ if r.HTTPResponse.StatusCode == http.StatusNotFound {
+ r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
+ }
+ })
+
+ return output.Content, req.Send()
+}
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+ op := &aws.Operation{
+ Name: "GetDynamicData",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "dynamic", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+
+ return output.Content, req.Send()
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+ resp, err := c.GetDynamicData("instance-identity/document")
+ if err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 instance identity document", err)
+ }
+
+ doc := EC2InstanceIdentityDocument{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 instance identity document", err)
+ }
+
+ return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+ resp, err := c.GetMetadata("iam/info")
+ if err != nil {
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 IAM info", err)
+ }
+
+ info := EC2IAMInfo{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+ return EC2IAMInfo{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 IAM info", err)
+ }
+
+ if info.Code != "Success" {
+ errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataError", errMsg, nil)
+ }
+
+ return info, nil
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+ resp, err := c.GetMetadata("placement/availability-zone")
+ if err != nil {
+ return "", err
+ }
+
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+ return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+ if _, err := c.GetMetadata("instance-id"); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// An EC2IAMInfo provides the shape for unmarshaling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/service.go
new file mode 100644
index 0000000..5c0a7e6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2metadata/service.go
@@ -0,0 +1,110 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+//
+// This package's client can be disabled completely by setting the environment
+// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
+// true instructs the SDK to disable the EC2 Metadata client. The client cannot
+// be used while the environemnt variable is set to true, (case insensitive).
+package ec2metadata
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *aws.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a Config.
+// This client is safe to use across multiple goroutines.
+//
+// Example:
+// // Create a EC2Metadata client from just a config.
+// svc := ec2metadata.New(cfg)
+func New(config aws.Config) *EC2Metadata {
+ svc := &EC2Metadata{
+ Client: aws.NewClient(
+ config,
+ aws.Metadata{
+ ServiceName: ServiceName,
+ APIVersion: "latest",
+ },
+ ),
+ }
+
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Disable the EC2 Metadata service if the environment variable is set.
+ // This shortcirctes the service's functionality to always fail to send
+ // requests.
+ if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
+ svc.Handlers.Send.SwapNamed(aws.NamedHandler{
+ Name: defaults.SendHandler.Name,
+ Fn: func(r *aws.Request) {
+ r.Error = awserr.New(
+ aws.ErrCodeRequestCanceled,
+ "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
+ nil)
+ },
+ })
+ }
+
+ return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+ return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+ Content string
+}
+
+func unmarshalHandler(r *aws.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+}
+
+func unmarshalError(r *aws.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *aws.Request) {
+ if r.Metadata.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds/provider.go
new file mode 100644
index 0000000..b8290bf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds/provider.go
@@ -0,0 +1,155 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A Provider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// The NewProvider function must be used to create the Provider.
+//
+// p := &ec2rolecreds.NewProvider(ec2metadata.New(cfg))
+//
+// // Expire the credentials 10 minutes before IAM states they should. Proactivily
+// // refreshing the credentials.
+// p.ExpiryWindow = 10 * time.Minute
+type Provider struct {
+ aws.SafeCredentialsProvider
+
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewProvider returns an initialized Provider value configured to retrieve
+// credentials from EC2 Instance Metadata service.
+func NewProvider(client *ec2metadata.EC2Metadata) *Provider {
+ p := &Provider{
+ Client: client,
+ }
+ p.RetrieveFn = p.retrieveFn
+
+ return p
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (p *Provider) retrieveFn() (aws.Credentials, error) {
+ credsList, err := requestCredList(p.Client)
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ if len(credsList) == 0 {
+ return aws.Credentials{},
+ awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(p.Client, credsName)
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ creds := aws.Credentials{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ Source: ProviderName,
+
+ CanExpire: true,
+ Expires: roleCreds.Expiration.Add(-p.ExpiryWindow),
+ }
+
+ return creds, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "/iam/security-credentials"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadata(iamSecurityCredsPath)
+ if err != nil {
+ return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(strings.NewReader(resp))
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("EC2RoleRequestError",
+ fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("SerializationError",
+ fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpointcreds/provider.go
new file mode 100644
index 0000000..dac0534
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpointcreds/provider.go
@@ -0,0 +1,167 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+// Provider satisfies the aws.CredentialsProvider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ aws.SafeCredentialsProvider
+
+ // The AWS Client to make HTTP requests to the endpoint with. The endpoint
+ // the request will be made to is provided by the aws.Config's
+ // EndpointResolver.
+ Client *aws.Client
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// New returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func New(cfg aws.Config) *Provider {
+ p := &Provider{
+ Client: aws.NewClient(
+ cfg,
+ aws.Metadata{
+ ServiceName: ProviderName,
+ },
+ ),
+ }
+ p.RetrieveFn = p.retrieveFn
+
+ p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
+ p.Client.Handlers.Validate.Clear()
+ p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ return p
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) retrieveFn() (aws.Credentials, error) {
+ resp, err := p.getCredentials()
+ if err != nil {
+ return aws.Credentials{},
+ awserr.New("CredentialsEndpointError", "failed to load credentials", err)
+ }
+
+ creds := aws.Credentials{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ Source: ProviderName,
+ }
+
+ if resp.Expiration != nil {
+ creds.CanExpire = true
+ creds.Expires = resp.Expiration.Add(-p.ExpiryWindow)
+ }
+
+ return creds, nil
+}
+
+type getCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+type errorOutput struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
+ op := &aws.Operation{
+ Name: "GetCredentials",
+ HTTPMethod: "GET",
+ }
+
+ out := &getCredentialsOutput{}
+ req := p.Client.NewRequest(op, nil, out)
+ req.HTTPRequest.Header.Set("Accept", "application/json")
+
+ return out, req.Send()
+}
+
+func validateEndpointHandler(r *aws.Request) {
+ if len(r.Metadata.Endpoint) == 0 {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
+
+func unmarshalHandler(r *aws.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ out := r.Data.(*getCredentialsOutput)
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+}
+
+func unmarshalError(r *aws.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var errOut errorOutput
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New(errOut.Code, errOut.Message, nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
new file mode 100644
index 0000000..3dad528
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
@@ -0,0 +1,52 @@
+package aws
+
+// EndpointResolver resolves an endpoint for a service endpoint id and region.
+type EndpointResolver interface {
+ ResolveEndpoint(service, region string) (Endpoint, error)
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(service, region string) (Endpoint, error)
+
+// ResolveEndpoint calls EndpointResolverFunc returning the endpoint, or error.
+func (fn EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) {
+ return fn(service, region)
+}
+
+// ResolveWithEndpoint allows a static Resolved Endpoint to be used as an endpoint resolver
+type ResolveWithEndpoint Endpoint
+
+// ResolveWithEndpointURL allows a static URL to be used as a endpoint resolver.
+func ResolveWithEndpointURL(url string) ResolveWithEndpoint {
+ return ResolveWithEndpoint(Endpoint{URL: url})
+}
+
+// ResolveEndpoint returns the static endpoint.
+func (v ResolveWithEndpoint) ResolveEndpoint(service, region string) (Endpoint, error) {
+ e := Endpoint(v)
+ e.SigningRegion = region
+ return e, nil
+}
+
+// Endpoint represents the endpoint a service client should make requests to.
+type Endpoint struct {
+ // The URL of the endpoint.
+ URL string
+
+ // The service name that should be used for signing the requests to the
+ // endpoint.
+ SigningName string
+
+ // The region that should be used for signing the request to the endpoint.
+ SigningRegion string
+
+ // States that the signing name for this endpoint was derived from metadata
+ // passed in, but was not explicitly modeled.
+ SigningNameDerived bool
+
+ // The signing method that should be used for signign the requests to the
+ // endpoint.
+ SigningMethod string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/decode.go
new file mode 100644
index 0000000..535f81b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/decode.go
@@ -0,0 +1,143 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+ SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver, err := endpoints.DecodeModel(reader)
+//
+// partitions := resolver.Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (*Resolver, error) {
+ var opts DecodeModelOptions
+ opts.Set(optFns...)
+
+ // Get the version of the partition file to determine what
+ // unmarshaling model to use.
+ modelDef := modelDefinition{}
+ if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ var version string
+ if b, ok := modelDef["version"]; ok {
+ version = string(b)
+ } else {
+ return nil, newDecodeModelError("endpoints version not found in model", nil)
+ }
+
+ if version == "3" {
+ return decodeV3Endpoints(modelDef, opts)
+ }
+
+ return nil, newDecodeModelError(
+ fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (*Resolver, error) {
+ b, ok := modelDef["partitions"]
+ if !ok {
+ return nil, newDecodeModelError("endpoints model missing partitions", nil)
+ }
+
+ ps := partitions{}
+ if err := json.Unmarshal(b, &ps); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ if opts.SkipCustomizations {
+ return &Resolver{partitions: ps}, nil
+ }
+
+ // Customization
+ for i := 0; i < len(ps); i++ {
+ p := &ps[i]
+ custAddEC2Metadata(p)
+ custAddS3DualStack(p)
+ custSetUnresolveServices(p)
+ }
+
+ return &Resolver{partitions: ps}, nil
+}
+
+func custAddS3DualStack(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ s, ok := p.Services["s3"]
+ if !ok {
+ return
+ }
+
+ s.Defaults.HasDualStack = boxedTrue
+ s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
+
+ p.Services["s3"] = s
+}
+
+func custAddEC2Metadata(p *partition) {
+ p.Services["ec2metadata"] = service{
+ IsRegionalized: boxedFalse,
+ PartitionEndpoint: "aws-global",
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ }
+}
+
+func custSetUnresolveServices(p *partition) {
+ var ids = map[string]struct{}{
+ "data.iot": {},
+ "cloudsearchdomain": {},
+ }
+ for id := range ids {
+ p.Services[id] = service{
+ Defaults: endpoint{
+ Unresolveable: boxedTrue,
+ },
+ }
+ }
+}
+
+type decodeModelError struct {
+ awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+ return decodeModelError{
+ awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/defaults.go
new file mode 100644
index 0000000..11e0f7d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/defaults.go
@@ -0,0 +1,3362 @@
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+// Partition identifiers
+const (
+ AwsPartitionID = "aws" // AWS Standard partition.
+ AwsCnPartitionID = "aws-cn" // AWS China partition.
+ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+ ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+ ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+ ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+ CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
+ EuWest1RegionID = "eu-west-1" // EU (Ireland).
+ EuWest2RegionID = "eu-west-2" // EU (London).
+ EuWest3RegionID = "eu-west-3" // EU (Paris).
+ SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
+ UsEast1RegionID = "us-east-1" // US East (N. Virginia).
+ UsEast2RegionID = "us-east-2" // US East (Ohio).
+ UsWest1RegionID = "us-west-1" // US West (N. California).
+ UsWest2RegionID = "us-west-2" // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+ CnNorth1RegionID = "cn-north-1" // China (Beijing).
+ CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+)
+
+// Service identifiers
+const (
+ A4bServiceID = "a4b" // A4b.
+ AcmServiceID = "acm" // Acm.
+ AcmPcaServiceID = "acm-pca" // AcmPca.
+ ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
+ ApiPricingServiceID = "api.pricing" // ApiPricing.
+ ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
+ ApigatewayServiceID = "apigateway" // Apigateway.
+ ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
+ Appstream2ServiceID = "appstream2" // Appstream2.
+ AthenaServiceID = "athena" // Athena.
+ AutoscalingServiceID = "autoscaling" // Autoscaling.
+ AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
+ BatchServiceID = "batch" // Batch.
+ BudgetsServiceID = "budgets" // Budgets.
+ CeServiceID = "ce" // Ce.
+ Cloud9ServiceID = "cloud9" // Cloud9.
+ ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
+ CloudformationServiceID = "cloudformation" // Cloudformation.
+ CloudfrontServiceID = "cloudfront" // Cloudfront.
+ CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
+ CloudsearchServiceID = "cloudsearch" // Cloudsearch.
+ CloudsearchdomainServiceID = "cloudsearchdomain" // Cloudsearchdomain.
+ CloudtrailServiceID = "cloudtrail" // Cloudtrail.
+ CodebuildServiceID = "codebuild" // Codebuild.
+ CodecommitServiceID = "codecommit" // Codecommit.
+ CodedeployServiceID = "codedeploy" // Codedeploy.
+ CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CodestarServiceID = "codestar" // Codestar.
+ CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
+ CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
+ CognitoSyncServiceID = "cognito-sync" // CognitoSync.
+ ComprehendServiceID = "comprehend" // Comprehend.
+ ConfigServiceID = "config" // Config.
+ CurServiceID = "cur" // Cur.
+ DataIotServiceID = "data.iot" // DataIot.
+ DatapipelineServiceID = "datapipeline" // Datapipeline.
+ DaxServiceID = "dax" // Dax.
+ DevicefarmServiceID = "devicefarm" // Devicefarm.
+ DirectconnectServiceID = "directconnect" // Directconnect.
+ DiscoveryServiceID = "discovery" // Discovery.
+ DmsServiceID = "dms" // Dms.
+ DsServiceID = "ds" // Ds.
+ DynamodbServiceID = "dynamodb" // Dynamodb.
+ Ec2ServiceID = "ec2" // Ec2.
+ Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
+ EcrServiceID = "ecr" // Ecr.
+ EcsServiceID = "ecs" // Ecs.
+ ElasticacheServiceID = "elasticache" // Elasticache.
+ ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
+ ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
+ ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
+ ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
+ ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
+ EmailServiceID = "email" // Email.
+ EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
+ EsServiceID = "es" // Es.
+ EventsServiceID = "events" // Events.
+ FirehoseServiceID = "firehose" // Firehose.
+ FmsServiceID = "fms" // Fms.
+ GameliftServiceID = "gamelift" // Gamelift.
+ GlacierServiceID = "glacier" // Glacier.
+ GlueServiceID = "glue" // Glue.
+ GreengrassServiceID = "greengrass" // Greengrass.
+ GuarddutyServiceID = "guardduty" // Guardduty.
+ HealthServiceID = "health" // Health.
+ IamServiceID = "iam" // Iam.
+ ImportexportServiceID = "importexport" // Importexport.
+ InspectorServiceID = "inspector" // Inspector.
+ IotServiceID = "iot" // Iot.
+ IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
+ KinesisServiceID = "kinesis" // Kinesis.
+ KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
+ KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
+ KmsServiceID = "kms" // Kms.
+ LambdaServiceID = "lambda" // Lambda.
+ LightsailServiceID = "lightsail" // Lightsail.
+ LogsServiceID = "logs" // Logs.
+ MachinelearningServiceID = "machinelearning" // Machinelearning.
+ MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+ MediaconvertServiceID = "mediaconvert" // Mediaconvert.
+ MedialiveServiceID = "medialive" // Medialive.
+ MediapackageServiceID = "mediapackage" // Mediapackage.
+ MediastoreServiceID = "mediastore" // Mediastore.
+ MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MghServiceID = "mgh" // Mgh.
+ MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ ModelsLexServiceID = "models.lex" // ModelsLex.
+ MonitoringServiceID = "monitoring" // Monitoring.
+ MturkRequesterServiceID = "mturk-requester" // MturkRequester.
+ NeptuneServiceID = "neptune" // Neptune.
+ OpsworksServiceID = "opsworks" // Opsworks.
+ OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ OrganizationsServiceID = "organizations" // Organizations.
+ PinpointServiceID = "pinpoint" // Pinpoint.
+ PollyServiceID = "polly" // Polly.
+ RdsServiceID = "rds" // Rds.
+ RedshiftServiceID = "redshift" // Redshift.
+ RekognitionServiceID = "rekognition" // Rekognition.
+ ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
+ Route53ServiceID = "route53" // Route53.
+ Route53domainsServiceID = "route53domains" // Route53domains.
+ RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
+ RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
+ S3ServiceID = "s3" // S3.
+ SdbServiceID = "sdb" // Sdb.
+ SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
+ ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
+ ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
+ ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
+ ShieldServiceID = "shield" // Shield.
+ SmsServiceID = "sms" // Sms.
+ SnowballServiceID = "snowball" // Snowball.
+ SnsServiceID = "sns" // Sns.
+ SqsServiceID = "sqs" // Sqs.
+ SsmServiceID = "ssm" // Ssm.
+ StatesServiceID = "states" // States.
+ StoragegatewayServiceID = "storagegateway" // Storagegateway.
+ StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
+ StsServiceID = "sts" // Sts.
+ SupportServiceID = "support" // Support.
+ SwfServiceID = "swf" // Swf.
+ TaggingServiceID = "tagging" // Tagging.
+ TranslateServiceID = "translate" // Translate.
+ WafServiceID = "waf" // Waf.
+ WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkdocsServiceID = "workdocs" // Workdocs.
+ WorkmailServiceID = "workmail" // Workmail.
+ WorkspacesServiceID = "workspaces" // Workspaces.
+ XrayServiceID = "xray" // Xray.
+)
+
+// NewDefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// Use DefaultPartitions() to get the list of the default partitions.
+func NewDefaultResolver() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// partitions := endpoints.DefaultPartitions
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DefaultPartitions() Partitions {
+ return defaultPartitions.Partitions()
+}
+
+var defaultPartitions = partitions{
+ awsPartition,
+ awscnPartition,
+ awsusgovPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+ return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+ ID: "aws",
+ Name: "AWS Standard",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "ap-northeast-1": region{
+ Description: "Asia Pacific (Tokyo)",
+ },
+ "ap-northeast-2": region{
+ Description: "Asia Pacific (Seoul)",
+ },
+ "ap-south-1": region{
+ Description: "Asia Pacific (Mumbai)",
+ },
+ "ap-southeast-1": region{
+ Description: "Asia Pacific (Singapore)",
+ },
+ "ap-southeast-2": region{
+ Description: "Asia Pacific (Sydney)",
+ },
+ "ca-central-1": region{
+ Description: "Canada (Central)",
+ },
+ "eu-central-1": region{
+ Description: "EU (Frankfurt)",
+ },
+ "eu-west-1": region{
+ Description: "EU (Ireland)",
+ },
+ "eu-west-2": region{
+ Description: "EU (London)",
+ },
+ "eu-west-3": region{
+ Description: "EU (Paris)",
+ },
+ "sa-east-1": region{
+ Description: "South America (Sao Paulo)",
+ },
+ "us-east-1": region{
+ Description: "US East (N. Virginia)",
+ },
+ "us-east-2": region{
+ Description: "US East (Ohio)",
+ },
+ "us-west-1": region{
+ Description: "US West (N. California)",
+ },
+ "us-west-2": region{
+ Description: "US West (Oregon)",
+ },
+ },
+ Services: services{
+ "a4b": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "acm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.mediatailor": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "api.pricing": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling-plans": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "autoscaling-plans",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "batch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "ce.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloud9": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "cloudfront.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudsearch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudsearchdomain": service{
+ Defaults: endpoint{
+ Unresolveable: boxedTrue,
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codepipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codestar": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-idp": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-sync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cur": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "data.iot": service{
+ Defaults: endpoint{
+ Unresolveable: boxedTrue,
+ },
+ },
+ "datapipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dax": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "devicefarm": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "discovery": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecr": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elastictranscoder": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "email": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "entitlement.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fms": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iotanalytics": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisvideo": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lightsail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "machinelearning": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "medialive": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediapackage": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediastore": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mgh": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "sandbox": endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ "us-east-1": endpoint{},
+ },
+ },
+ "neptune": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "opsworks": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "opsworks-cm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "organizations.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "resource-groups": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "route53.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "route53domains": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "runtime.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "s3": service{
+ PartitionEndpoint: "us-east-1",
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "s3.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{
+ Hostname: "s3.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{
+ Hostname: "s3.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "s3-external-1": endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{
+ Hostname: "s3.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ "sdb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v2"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ Hostname: "sdb.amazonaws.com",
+ },
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "secretsmanager": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-northeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-south-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ca-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "sa-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "servicediscovery": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "shield": service{
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "Shield.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "sqs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "sqs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "sqs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "sqs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "queue.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sts": service{
+ PartitionEndpoint: "aws-global",
+ Defaults: endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{
+ Hostname: "sts.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "aws-global": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "support": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "translate-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "translate-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "translate-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "waf": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "waf.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workdocs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workmail": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "xray": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+ return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+ ID: "aws-cn",
+ Name: "AWS China",
+ DNSSuffix: "amazonaws.com.cn",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "cn-north-1": region{
+ Description: "China (Beijing)",
+ },
+ "cn-northwest-1": region{
+ Description: "China (Ningxia)",
+ },
+ },
+ Services: services{
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudsearchdomain": service{
+ Defaults: endpoint{
+ Unresolveable: boxedTrue,
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "data.iot": service{
+ Defaults: endpoint{
+ Unresolveable: boxedTrue,
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecr": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "iam.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+ return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+ ID: "aws-us-gov",
+ Name: "AWS GovCloud (US)",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-gov-west-1": region{
+ Description: "AWS GovCloud (US)",
+ },
+ },
+ Services: services{
+ "acm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudsearchdomain": service{
+ Defaults: endpoint{
+ Unresolveable: boxedTrue,
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "data.iot": service{
+ Defaults: endpoint{
+ Unresolveable: boxedTrue,
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ec2": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecr": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sns": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/doc.go
new file mode 100644
index 0000000..faf8723
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/doc.go
@@ -0,0 +1,69 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+// resolver := endpoints.NewDefaultResolver()
+// partitions := resolver.Partitions()
+//
+// for _, p := range partitions {
+// fmt.Println("Regions for", p.ID())
+// for id, _ := range p.Regions() {
+// fmt.Println("*", id)
+// }
+//
+// fmt.Println("Services for", p.ID())
+// for id, _ := range p.Services() {
+// fmt.Println("*", id)
+// }
+// }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.ResolveEndpoint, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+// defaultResolver := endpoints.NewDefaultResolver()
+// myCustomResolver := func(service, region string) (aws.Endpoint, error) {
+// if service == endpoints.S3ServiceID {
+// return aws.Endpoint{
+// URL: "s3.custom.endpoint.com",
+// SigningRegion: "custom-signing-region",
+// }, nil
+// }
+//
+// return defaultResolver.ResolveEndpoint(service, region)
+// }
+//
+// cfg, err := external.LoadDefaultAWSConfig()
+// if err != nil {
+// panic(err)
+// }
+// cfg.Region = "us-west-2"
+// cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
+package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/endpoints.go
new file mode 100644
index 0000000..194b57b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/endpoints.go
@@ -0,0 +1,344 @@
+package endpoints
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+// ResolveOptions provide the configuration needed to direct how the
+// endpoints will be resolved.
+type ResolveOptions struct {
+ // DisableSSL forces the endpoint to be resolved as HTTP.
+ // instead of HTTPS if the service supports it.
+ DisableSSL bool
+
+ // Sets the resolver to resolve the endpoint as a dualstack endpoint
+ // for the service. If dualstack support for a service is not known and
+ // StrictMatching is not enabled a dualstack endpoint for the service will
+ // be returned. This endpoint may not be valid. If StrictMatching is
+ // enabled only services that are known to support dualstack will return
+ // dualstack endpoints.
+ UseDualStack bool
+
+ // Enables strict matching of services and regions resolved endpoints.
+ // If the partition doesn't enumerate the exact service and region an
+ // error will be returned. This option will prevent returning endpoints
+ // that look valid, but may not resolve to any real endpoint.
+ StrictMatching bool
+}
+
+// A Resolver provides endpoint resolution based on modeled endpoint data.
+type Resolver struct {
+ ResolveOptions
+
+ partitions partitions
+}
+
+// ResolveEndpoint attempts to resolve an endpoint againsted the modeled endpoint
+// data. If an endpoint is found it will be returned. An error will be returned
+// otherwise.
+//
+// Searches through the partitions in the order they are defined.
+func (r *Resolver) ResolveEndpoint(service, region string) (aws.Endpoint, error) {
+ return r.partitions.EndpointFor(service, region, r.ResolveOptions)
+}
+
+// Partitions returns the partitions that make up the resolver.
+func (r *Resolver) Partitions() Partitions {
+ return r.partitions.Partitions()
+}
+
+// Partitions is a slice of paritions describing regions and endpoints
+type Partitions []Partition
+
+// ForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func (ps Partitions) ForRegion(id string) (Partition, bool) {
+ for _, p := range ps {
+ if _, ok := p.p.Regions[id]; ok || p.p.RegionRegex.MatchString(id) {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
+// ForPartition returns the parition with the matching ID passed in.
+func (ps Partitions) ForPartition(id string) (Partition, bool) {
+ for _, p := range ps {
+ if p.ID() == id {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier of the partition.
+func (p Partition) ID() string { return p.id }
+
+// Endpoint attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the endpoint will be resolved
+// based on the parition's endpoint pattern, and service endpoint prefix.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the endpoint returned my look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expansions.
+//
+// Errors that can be returned.
+// * UnknownServiceError
+// * UnknownEndpointError
+func (p Partition) Endpoint(service, region string, opts ResolveOptions) (aws.Endpoint, error) {
+ return p.p.EndpointFor(service, region, opts)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p Partition) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range p.p.Regions {
+ rs[id] = Region{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return rs
+}
+
+// RegionsForService returns the map of regions for the service id specified.
+// false is returned if the service is not found in the partition.
+func (p Partition) RegionsForService(id string) (map[string]Region, bool) {
+ if _, ok := p.p.Services[id]; !ok {
+ return nil, false
+ }
+
+ s := Service{
+ id: id,
+ p: p.p,
+ }
+ return s.Regions(), true
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p Partition) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id := range p.p.Services {
+ ss[id] = Service{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return ss
+}
+
+// Resolver returns an endpoint resolver for the partitions. Use this to satisfy
+// the SDK's EndpointResolver.
+func (p Partition) Resolver() *Resolver {
+ return &Resolver{
+ partitions: partitions{*p.p},
+ }
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+ id, desc string
+ p *partition
+}
+
+// ID returns the region's identifier.
+func (r Region) ID() string { return r.id }
+
+// Endpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r Region) Endpoint(service string, opts ResolveOptions) (aws.Endpoint, error) {
+ return r.p.EndpointFor(service, r.id, opts)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r Region) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id, s := range r.p.Services {
+ if _, ok := s.Endpoints[r.id]; ok {
+ ss[id] = Service{
+ id: id,
+ p: r.p,
+ }
+ }
+ }
+
+ return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier for the service.
+func (s Service) ID() string { return s.id }
+
+// Endpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s Service) Endpoint(region string, opts ResolveOptions) (aws.Endpoint, error) {
+ return s.p.EndpointFor(s.id, region, opts)
+}
+
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range s.p.Services[s.id].Endpoints {
+ if _, ok := s.p.Regions[id]; ok {
+ rs[id] = Region{
+ id: id,
+ p: s.p,
+ }
+ }
+ }
+
+ return rs
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
+ es := map[string]Endpoint{}
+ for id := range s.p.Services[s.id].Endpoints {
+ es[id] = Endpoint{
+ id: id,
+ serviceID: s.id,
+ p: s.p,
+ }
+ }
+
+ return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+ id string
+ serviceID string
+ p *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e Endpoint) ServiceID() string { return e.serviceID }
+
+// Resolve resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e Endpoint) Resolve(opts ResolveOptions) (aws.Endpoint, error) {
+ return e.p.EndpointFor(e.serviceID, e.id, opts)
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+ awsError
+ Partition string
+ Service string
+ Known []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+ return UnknownServiceError{
+ awsError: awserr.New("UnknownServiceError",
+ "could not resolve endpoint for unknown service", nil),
+ Partition: p,
+ Service: s,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q",
+ e.Partition, e.Service)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+ return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+ Known []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+ return UnknownEndpointError{
+ awsError: awserr.New("UnknownEndpointError",
+ "could not resolve endpoint", nil),
+ Partition: p,
+ Service: s,
+ Region: r,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+ e.Partition, e.Service, e.Region)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model.go
new file mode 100644
index 0000000..d3bcbb8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model.go
@@ -0,0 +1,313 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ResolveOptions) (aws.Endpoint, error) {
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(service, region, opts.StrictMatching) {
+ continue
+ }
+
+ return ps[i].EndpointFor(service, region, opts)
+ }
+
+ // If loose matching fallback to first partition format to use
+ // when resolving the endpoint.
+ if !opts.StrictMatching && len(ps) > 0 {
+ return ps[0].EndpointFor(service, region, opts)
+ }
+
+ return aws.Endpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() Partitions {
+ parts := make(Partitions, 0, len(ps))
+ for i := 0; i < len(ps); i++ {
+ parts = append(parts, ps[i].Partition())
+ }
+
+ return parts
+}
+
+type partition struct {
+ ID string `json:"partition"`
+ Name string `json:"partitionName"`
+ DNSSuffix string `json:"dnsSuffix"`
+ RegionRegex regionRegex `json:"regionRegex"`
+ Defaults endpoint `json:"defaults"`
+ Regions regions `json:"regions"`
+ Services services `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+ return Partition{
+ id: p.ID,
+ p: &p,
+ }
+}
+
+func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
+ s, hasService := p.Services[service]
+ _, hasEndpoint := s.Endpoints[region]
+
+ if hasEndpoint && hasService {
+ return true
+ }
+
+ if strictMatch {
+ return false
+ }
+
+ return p.RegionRegex.MatchString(region)
+}
+
+func (p partition) EndpointFor(service, region string, opts ResolveOptions) (resolved aws.Endpoint, err error) {
+ s, hasService := p.Services[service]
+ if !hasService && opts.StrictMatching {
+ // Only return error if the resolver will not fallback to creating
+ // endpoint based on service endpoint ID passed in.
+ return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+ }
+
+ e, hasEndpoint := s.endpointForRegion(region)
+ if !hasEndpoint && opts.StrictMatching {
+ return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
+ }
+
+ defs := []endpoint{p.Defaults, s.Defaults}
+ return e.resolve(service, region, p.DNSSuffix, defs, opts), nil
+}
+
+func serviceList(ss services) []string {
+ list := make([]string, 0, len(ss))
+ for k := range ss {
+ list = append(list, k)
+ }
+ return list
+}
+func endpointList(es endpoints) []string {
+ list := make([]string, 0, len(es))
+ for k := range es {
+ list = append(list, k)
+ }
+ return list
+}
+
+type regionRegex struct {
+ *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+ // Strip leading and trailing quotes
+ regex, err := strconv.Unquote(string(b))
+ if err != nil {
+ return fmt.Errorf("unable to strip quotes from regex, %v", err)
+ }
+
+ rr.Regexp, err = regexp.Compile(regex)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal region regex, %v", err)
+ }
+ return nil
+}
+
+type regions map[string]region
+
+type region struct {
+ Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+ PartitionEndpoint string `json:"partitionEndpoint"`
+ IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
+ Defaults endpoint `json:"defaults"`
+ Endpoints endpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string) (endpoint, bool) {
+ if s.IsRegionalized == boxedFalse {
+ return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+ }
+
+ if e, ok := s.Endpoints[region]; ok {
+ return e, true
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return endpoint{}, false
+}
+
+type endpoints map[string]endpoint
+
+type endpoint struct {
+ // True if the endpoint cannot be resolved for this partition/region/service
+ Unresolveable boxedBool `json:"-"`
+
+ Hostname string `json:"hostname"`
+ Protocols []string `json:"protocols"`
+ CredentialScope credentialScope `json:"credentialScope"`
+
+ // Custom fields not modeled
+ HasDualStack boxedBool `json:"-"`
+ DualStackHostname string `json:"-"`
+
+ // Signature Version not used
+ SignatureVersions []string `json:"signatureVersions"`
+
+ // SSLCommonName not used.
+ SSLCommonName string `json:"sslCommonName"`
+}
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts ResolveOptions) aws.Endpoint {
+ var merged endpoint
+ for _, def := range defs {
+ merged.mergeIn(def)
+ }
+ merged.mergeIn(e)
+ e = merged
+
+ var u string
+ if e.Unresolveable != boxedTrue {
+ // Only attempt to resolve the endpoint if it can be resolved.
+ hostname := e.Hostname
+
+ // Offset the hostname for dualstack if enabled
+ if opts.UseDualStack && e.HasDualStack == boxedTrue {
+ hostname = e.DualStackHostname
+ }
+
+ u = strings.Replace(hostname, "{service}", service, 1)
+ u = strings.Replace(u, "{region}", region, 1)
+ u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
+
+ scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+ u = fmt.Sprintf("%s://%s", scheme, u)
+ }
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+
+ signingName := e.CredentialScope.Service
+ var signingNameDerived bool
+ if len(signingName) == 0 {
+ signingName = service
+ signingNameDerived = true
+ }
+
+ return aws.Endpoint{
+ URL: u,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningNameDerived: signingNameDerived,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+ if disableSSL {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+ if other.Unresolveable != boxedBoolUnset {
+ e.Unresolveable = other.Unresolveable
+ }
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SSLCommonName) > 0 {
+ e.SSLCommonName = other.SSLCommonName
+ }
+ if other.HasDualStack != boxedBoolUnset {
+ e.HasDualStack = other.HasDualStack
+ }
+ if len(other.DualStackHostname) > 0 {
+ e.DualStackHostname = other.DualStackHostname
+ }
+}
+
+type credentialScope struct {
+ Region string `json:"region"`
+ Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+ v, err := strconv.ParseBool(string(buf))
+ if err != nil {
+ return err
+ }
+
+ if v {
+ *b = boxedTrue
+ } else {
+ *b = boxedFalse
+ }
+
+ return nil
+}
+
+const (
+ boxedBoolUnset boxedBool = iota
+ boxedFalse
+ boxedTrue
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model_codegen.go
new file mode 100644
index 0000000..033afb2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,330 @@
+// +build codegen
+
+package endpoints
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+ // Options for how the model will be decoded.
+ DecodeModelOptions DecodeModelOptions
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+ var opts CodeGenOptions
+ opts.Set(optFns...)
+
+ resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+ *d = opts.DecodeModelOptions
+ })
+ if err != nil {
+ return err
+ }
+
+ tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+ if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver.partitions); err != nil {
+ return fmt.Errorf("failed to execute template, %v", err)
+ }
+
+ return nil
+}
+
+func toSymbol(v string) string {
+ out := []rune{}
+ for _, c := range strings.Title(v) {
+ if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+ continue
+ }
+
+ out = append(out, c)
+ }
+
+ return string(out)
+}
+
+func quoteString(v string) string {
+ return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+ return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+ return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+ return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+ names := []string{}
+ switch len(ps) {
+ case 1:
+ return ps[0].Name
+ case 2:
+ return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+ default:
+ for i, p := range ps {
+ if i == len(ps)-1 {
+ names = append(names, "and "+p.Name)
+ } else {
+ names = append(names, p.Name)
+ }
+ }
+ return strings.Join(names, ", ")
+ }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+ switch v {
+ case boxedTrue:
+ return fmt.Sprintf(msg, "boxedTrue")
+ case boxedFalse:
+ return fmt.Sprintf(msg, "boxedFalse")
+ default:
+ return ""
+ }
+}
+
+func stringIfSet(msg, v string) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+ if len(vs) == 0 {
+ return ""
+ }
+
+ names := []string{}
+ for _, v := range vs {
+ names = append(names, `"`+v+`"`)
+ }
+
+ return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+ return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+ set := map[string]struct{}{}
+ for _, p := range ps {
+ for id := range p.Services {
+ set[id] = struct{}{}
+ }
+ }
+
+ return set
+}
+
+var funcMap = template.FuncMap{
+ "ToSymbol": toSymbol,
+ "QuoteString": quoteString,
+ "RegionConst": regionConstName,
+ "PartitionGetter": partitionGetter,
+ "PartitionVarName": partitionVarName,
+ "ListPartitionNames": listPartitionNames,
+ "BoxedBoolIfSet": boxedBoolIfSet,
+ "StringIfSet": stringIfSet,
+ "StringSliceIfSet": stringSliceIfSet,
+ "EndpointIsSet": endpointIsSet,
+ "ServicesSet": serviceSet,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+ {{ template "partition consts" . }}
+
+ {{ range $_, $partition := . }}
+ {{ template "partition region consts" $partition }}
+ {{ end }}
+
+ {{ template "service consts" . }}
+
+ {{ template "endpoint resolvers" . }}
+{{- end }}
+
+{{ define "partition consts" }}
+ // Partition identifiers
+ const (
+ {{ range $_, $p := . -}}
+ {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "partition region consts" }}
+ // {{ .Name }} partition's regions.
+ const (
+ {{ range $id, $region := .Regions -}}
+ {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "service consts" }}
+ // Service identifiers
+ const (
+ {{ $serviceSet := ServicesSet . -}}
+ {{ range $id, $_ := $serviceSet -}}
+ {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+ // NewDefaultResolver returns an Endpoint resolver that will be able
+ // to resolve endpoints for: {{ ListPartitionNames . }}.
+ //
+ // Use DefaultPartitions() to get the list of the default partitions.
+ func NewDefaultResolver() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+ }
+
+ // DefaultPartitions returns a list of the partitions the SDK is bundled
+ // with. The available partitions are: {{ ListPartitionNames . }}.
+ //
+ // partitions := endpoints.DefaultPartitions
+ // for _, p := range partitions {
+ // // ... inspect partitions
+ // }
+ func DefaultPartitions() Partitions {
+ return defaultPartitions.Partitions()
+ }
+
+ var defaultPartitions = partitions{
+ {{ range $_, $partition := . -}}
+ {{ PartitionVarName $partition.ID }},
+ {{ end }}
+ }
+
+ {{ range $_, $partition := . -}}
+ {{ $name := PartitionGetter $partition.ID -}}
+ // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+ func {{ $name }}() Partition {
+ return {{ PartitionVarName $partition.ID }}.Partition()
+ }
+ var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+ {{ end }}
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+ {{ StringIfSet "ID: %q,\n" .ID -}}
+ {{ StringIfSet "Name: %q,\n" .Name -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults }},
+ {{- end }}
+ Regions: {{ template "gocode Regions" .Regions }},
+ Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+ Regexp: func() *regexp.Regexp{
+ reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+ return reg
+ }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+ {{ range $id, $region := . -}}
+ "{{ $id }}": {{ template "gocode Region" $region }},
+ {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+ {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+ {{ range $id, $service := . -}}
+ "{{ $id }}": {{ template "gocode Service" $service }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+ {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults -}},
+ {{- end }}
+ {{ if .Endpoints -}}
+ Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+ {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+endpoints{
+ {{ range $id, $endpoint := . -}}
+ "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+ {{ BoxedBoolIfSet "Unresolveable: %s,\n" .Unresolveable -}}
+ {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+ {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+ {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+ {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+ {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+ CredentialScope: credentialScope{
+ {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+ },
+ {{- end }}
+ {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
+ {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
+
+}
+{{- end }}
+`
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go
new file mode 100644
index 0000000..c567137
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go
@@ -0,0 +1,17 @@
+package aws
+
+import "github.com/aws/aws-sdk-go-v2/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ //
+ // @readonly
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ //
+ // @readonly
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/config.go
new file mode 100644
index 0000000..5753b43
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/config.go
@@ -0,0 +1,132 @@
+package external
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// DefaultConfigLoaders are a slice of functions that will read external configuration
+// sources for configuration values. These values are read by the AWSConfigResolvers
+// using interfaces to extract specific information from the external configuration.
+var DefaultConfigLoaders = []ConfigLoader{
+ LoadEnvConfig,
+ LoadSharedConfigIgnoreNotExist,
+}
+
+// DefaultAWSConfigResolvers are a slice of functions that will resolve external
+// configuration values into AWS configuration values.
+//
+// This will setup the AWS configuration's Region,
+var DefaultAWSConfigResolvers = []AWSConfigResolver{
+ ResolveDefaultAWSConfig,
+ ResolveCustomCABundle,
+
+ ResolveRegion,
+
+ ResolveFallbackEC2Credentials, // Initial defauilt credentails provider.
+ ResolveCredentialsValue,
+ ResolveEndpointCredentials,
+ ResolveContainerEndpointPathCredentials, // TODO is this order right?
+ ResolveAssumeRoleCredentials,
+}
+
+// A Config represents a generic configuration value or set of values. This type
+// will be used by the AWSConfigResolvers to extract
+//
+// General the Config type will use type assertion against the Provider interfaces
+// to extract specific data from the Config.
+type Config interface{}
+
+// A ConfigLoader is used to load external configuration data and returns it as
+// a generic Config type.
+//
+// The loader should return an error if it fails to load the external configuration
+// or the configuration data is malformed, or required components missing.
+type ConfigLoader func(Configs) (Config, error)
+
+// An AWSConfigResolver will extract configuration data from the Configs slice
+// using the provider interfaces to extract specific functionality. The extracted
+// configuration values will be written to the AWS Config value.
+//
+// The resolver should return an error if it it fails to extract the data, the
+// data is malformed, or incomplete.
+type AWSConfigResolver func(cfg *aws.Config, configs Configs) error
+
+// Configs is a slice of Config values. These values will be used by the
+// AWSConfigResolvers to extract external configuration values to populate the
+// AWS Config type.
+//
+// Use AppendFromLoaders to add additional external Config values that are
+// loaded from external sources.
+//
+// Use ResolveAWSConfig after external Config values have been added or loaded
+// to extract the loaded configuration values into the AWS Config.
+type Configs []Config
+
+// AppendFromLoaders iterates over the slice of loaders passed in calling each
+// loader function in order. The external config value returned by the loader
+// will be added to the returned Configs slice.
+//
+// If a loader returns an error this method will stop iterating and return
+// that error.
+func (cs Configs) AppendFromLoaders(loaders []ConfigLoader) (Configs, error) {
+ for _, fn := range loaders {
+ cfg, err := fn(cs)
+ if err != nil {
+ return nil, err
+ }
+
+ cs = append(cs, cfg)
+ }
+
+ return cs, nil
+}
+
+// ResolveAWSConfig returns a AWS configuration populated with values by calling
+// the resolvers slice passed in. Each resolver is called in order. Any resolver
+// may overwrite the AWs Configuration value of a previous resolver.
+//
+// If an resolver returns an error this method will return that error, and stop
+// iterating over the resolvers.
+func (cs Configs) ResolveAWSConfig(resolvers []AWSConfigResolver) (aws.Config, error) {
+ var cfg aws.Config
+
+ for _, fn := range resolvers {
+ if err := fn(&cfg, cs); err != nil {
+ // TODO provide better error?
+ return aws.Config{}, err
+ }
+ }
+
+ return cfg, nil
+}
+
+// LoadDefaultAWSConfig reads the SDK's default external configurations, and
+// populates an AWS Config with the values from the external configurations.
+//
+// An optional variadic set of additional Config values can be provided as input
+// that will be prepended to the Configs slice. Use this to add custom configuration.
+// The custom configurations must satisfy the respective providers for their data
+// or the custom data will be ignored by the resolvers and config loaders.
+//
+// cfg, err := external.LoadDefaultAWSConfig(
+// WithSharedConfigProfile("test-profile"),
+// )
+// if err != nil {
+// panic(fmt.Sprintf("failed loading config, %v", err))
+// }
+//
+//
+// The default configuration sources are:
+// * Environment Variables
+// * Shared Configuration and Shared Credentials files.
+func LoadDefaultAWSConfig(configs ...Config) (aws.Config, error) {
+ var cfgs Configs
+ cfgs = append(cfgs, configs...)
+
+ cfgs, err := cfgs.AppendFromLoaders(DefaultConfigLoaders)
+ if err != nil {
+ return aws.Config{}, err
+ }
+
+ return cfgs.ResolveAWSConfig(DefaultAWSConfigResolvers)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/env_config.go
new file mode 100644
index 0000000..902252c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/env_config.go
@@ -0,0 +1,236 @@
+package external
+
+import (
+ "io/ioutil"
+ "os"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// CredentialsSourceName provides a name of the provider when config is
+// loaded from environment.
+const CredentialsSourceName = "EnvConfigCredentials"
+
+// Environment variables that will be read for configuration values.
+const (
+ AWSAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
+ AWSAccessKeyEnvVar = "AWS_ACCESS_KEY"
+
+ AWSSecreteAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
+ AWSSecreteKeyEnvVar = "AWS_SECRET_KEY"
+
+ AWSSessionTokenEnvVar = "AWS_SESSION_TOKEN"
+
+ AWSCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+
+ // TODO shorter name?
+ AWSContainerCredentialsEndpointPathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+
+ AWSRegionEnvVar = "AWS_REGION"
+ AWSDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
+
+ AWSProfileEnvVar = "AWS_PROFILE"
+ AWSDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE"
+
+ AWSSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE"
+
+ AWSConfigFileEnvVar = "AWS_CONFIG_FILE"
+
+ AWSCustomCABundleEnvVar = "AWS_CA_BUNDLE"
+)
+
+var (
+ credAccessEnvKeys = []string{
+ AWSAccessKeyIDEnvVar,
+ AWSAccessKeyEnvVar,
+ }
+ credSecretEnvKeys = []string{
+ AWSSecreteAccessKeyEnvVar,
+ AWSSecreteKeyEnvVar,
+ }
+ regionEnvKeys = []string{
+ AWSRegionEnvVar,
+ AWSDefaultRegionEnvVar,
+ }
+ profileEnvKeys = []string{
+ AWSProfileEnvVar,
+ AWSDefaultProfileEnvVar,
+ }
+)
+
+// EnvConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type EnvConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Credentials aws.Credentials
+
+ // TODO doc
+ CredentialsEndpoint string
+
+ // TODO doc, shorter name?
+ ContainerCredentialsEndpointPath string
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-west-2
+ // AWS_DEFAULT_REGION=us-west-2
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ // AWS_DEFAULT_PROFILE=my_profile
+ SharedConfigProfile string
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the config. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+}
+
+// LoadEnvConfig reads configuration values from the OS's environment variables.
+// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type.
+func LoadEnvConfig(cfgs Configs) (Config, error) {
+ return NewEnvConfig()
+}
+
+// NewEnvConfig retrieves the SDK's environment configuration.
+// See `EnvConfig` for the values that will be retrieved.
+func NewEnvConfig() (EnvConfig, error) {
+ var cfg EnvConfig
+
+ creds := aws.Credentials{
+ Source: CredentialsSourceName,
+ }
+ setFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys)
+ setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys)
+ if creds.HasKeys() {
+ creds.SessionToken = os.Getenv(AWSSessionTokenEnvVar)
+ cfg.Credentials = creds
+ }
+
+ cfg.CredentialsEndpoint = os.Getenv(AWSCredentialsEndpointEnvVar)
+ cfg.ContainerCredentialsEndpointPath = os.Getenv(AWSContainerCredentialsEndpointPathEnvVar)
+
+ setFromEnvVal(&cfg.Region, regionEnvKeys)
+ setFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys)
+
+ cfg.SharedCredentialsFile = os.Getenv(AWSSharedCredentialsFileEnvVar)
+ cfg.SharedConfigFile = os.Getenv(AWSConfigFileEnvVar)
+
+ cfg.CustomCABundle = os.Getenv(AWSCustomCABundleEnvVar)
+
+ return cfg, nil
+}
+
+// GetRegion returns the AWS Region if set in the environment. Returns an empty
+// string if not set.
+func (c EnvConfig) GetRegion() (string, error) {
+ return c.Region, nil
+}
+
+// GetCredentialsValue returns the AWS Credentials if both AccessKey and ScreteAccessKey
+// are set in the environment. Returns a zero value Credentials if not set.
+func (c EnvConfig) GetCredentialsValue() (aws.Credentials, error) {
+ return c.Credentials, nil
+}
+
+// GetSharedConfigProfile returns the shared config profile if set in the
+// environment. Returns an empty string if not set.
+func (c EnvConfig) GetSharedConfigProfile() (string, error) {
+ return c.SharedConfigProfile, nil
+}
+
+// GetCredentialsEndpoint returns the credentials endpoint string if set.
+func (c EnvConfig) GetCredentialsEndpoint() (string, error) {
+ return c.CredentialsEndpoint, nil
+}
+
+// GetContainerCredentialsEndpointPath returns the container credentails endpoint
+// path string if set.
+func (c EnvConfig) GetContainerCredentialsEndpointPath() (string, error) {
+ return c.ContainerCredentialsEndpointPath, nil
+}
+
+// GetSharedConfigFiles returns a slice of filenames set in the environment.
+//
+// Will return the filenames in the order of:
+// * Shared Credentials
+// * Shared Config
+func (c EnvConfig) GetSharedConfigFiles() ([]string, error) {
+ files := make([]string, 0, 2)
+ if v := c.SharedCredentialsFile; len(v) > 0 {
+ files = append(files, v)
+ }
+ if v := c.SharedConfigFile; len(v) > 0 {
+ files = append(files, v)
+ }
+
+ return files, nil
+}
+
+// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
+func (c EnvConfig) GetCustomCABundle() ([]byte, error) {
+ if len(c.CustomCABundle) == 0 {
+ return nil, nil
+ }
+
+ return ioutil.ReadFile(c.CustomCABundle)
+}
+
+func setFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) > 0 {
+ *dst = v
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/http_client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/http_client.go
new file mode 100644
index 0000000..71e9830
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/http_client.go
@@ -0,0 +1,42 @@
+package external
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+func addHTTPClientCABundle(client *http.Client, pemCerts []byte) error {
+ var t *http.Transport
+
+ switch v := client.Transport.(type) {
+ case *http.Transport:
+ t = v
+ default:
+ if client.Transport != nil {
+ return awserr.New("LoadCustomCABundleError",
+ "unable to set custom CA bundle trasnsport must be http.Transport type", nil)
+ }
+ }
+
+ if t == nil {
+ t = &http.Transport{}
+ }
+ if t.TLSClientConfig == nil {
+ t.TLSClientConfig = &tls.Config{}
+ }
+ if t.TLSClientConfig.RootCAs == nil {
+ t.TLSClientConfig.RootCAs = x509.NewCertPool()
+ }
+
+ if !t.TLSClientConfig.RootCAs.AppendCertsFromPEM(pemCerts) {
+ return awserr.New("LoadCustomCABundleError",
+ "failed to load custom CA bundle PEM file", nil)
+ }
+
+ client.Transport = t
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/local.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/local.go
new file mode 100644
index 0000000..a555f42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/local.go
@@ -0,0 +1,49 @@
+package external
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return ip.IsLoopback(), nil
+ }
+
+ // Host is not an ip, perform lookup
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+ for _, addr := range addrs {
+ if !net.ParseIP(addr).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func validateLocalURL(v string) error {
+ u, err := url.Parse(v)
+ if err != nil {
+ return err
+ }
+
+ host := aws.URLHostname(u)
+ if len(host) == 0 {
+ return fmt.Errorf("unable to parse host from local HTTP cred provider URL")
+ } else if isLoopback, err := isLoopbackHost(host); err != nil {
+ return fmt.Errorf("failed to resolve host %q, %v", host, err)
+ } else if !isLoopback {
+ return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/provider.go
new file mode 100644
index 0000000..54fe932
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/provider.go
@@ -0,0 +1,325 @@
+package external
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+)
+
+// SharedConfigProfileProvider provides access to the shared config profile
+// name external configuration value.
+type SharedConfigProfileProvider interface {
+ GetSharedConfigProfile() (string, error)
+}
+
+// WithSharedConfigProfile wraps a strings to satisfy the SharedConfigProfileProvider
+// interface so a slice of custom shared config files ared used when loading the
+// SharedConfig.
+type WithSharedConfigProfile string
+
+// GetSharedConfigProfile returns the shared config profile.
+func (c WithSharedConfigProfile) GetSharedConfigProfile() (string, error) {
+ return string(c), nil
+}
+
+// GetSharedConfigProfile searchds the Configs for a SharedConfigProfileProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func GetSharedConfigProfile(configs Configs) (string, bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(SharedConfigProfileProvider); ok {
+ v, err := p.GetSharedConfigProfile()
+ if err != nil {
+ return "", false, err
+ }
+ if len(v) > 0 {
+ return v, true, nil
+ }
+ }
+ }
+
+ return "", false, nil
+}
+
+// SharedConfigFilesProvider provides access to the shared config filesnames
+// external configuration value.
+type SharedConfigFilesProvider interface {
+ GetSharedConfigFiles() ([]string, error)
+}
+
+// WithSharedConfigFiles wraps a slice of strings to satisfy the
+// SharedConfigFilesProvider interface so a slice of custom shared config files
+// ared used when loading the SharedConfig.
+type WithSharedConfigFiles []string
+
+// GetSharedConfigFiles returns the slice of shared config files.
+func (c WithSharedConfigFiles) GetSharedConfigFiles() ([]string, error) {
+ return []string(c), nil
+}
+
+// GetSharedConfigFiles searchds the Configs for a SharedConfigFilesProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func GetSharedConfigFiles(configs Configs) ([]string, bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(SharedConfigFilesProvider); ok {
+ v, err := p.GetSharedConfigFiles()
+ if err != nil {
+ return nil, false, err
+ }
+ if len(v) > 0 {
+ return v, true, nil
+ }
+ }
+ }
+
+ return nil, false, nil
+}
+
+// CustomCABundleProvider provides access to the custom CA bundle PEM bytes.
+type CustomCABundleProvider interface {
+ GetCustomCABundle() ([]byte, error)
+}
+
+// WithCustomCABundle provides wrapping of a region string to satisfy the
+// CustomCABundleProvider interface.
+type WithCustomCABundle []byte
+
+// GetCustomCABundle returns the CA bundle PEM bytes.
+func (v WithCustomCABundle) GetCustomCABundle() ([]byte, error) {
+ return []byte(v), nil
+}
+
+// GetCustomCABundle searchds the Configs for a CustomCABundleProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func GetCustomCABundle(configs Configs) ([]byte, bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(CustomCABundleProvider); ok {
+ v, err := p.GetCustomCABundle()
+ if err != nil {
+ return nil, false, err
+ }
+ if len(v) > 0 {
+ return v, true, nil
+ }
+ }
+ }
+
+ return nil, false, nil
+}
+
+// RegionProvider provides access to the region external configuration value.
+type RegionProvider interface {
+ GetRegion() (string, error)
+}
+
+// WithRegion provides wrapping of a region string to satisfy the RegionProvider
+// interface.
+type WithRegion string
+
+// GetRegion returns the region string.
+func (v WithRegion) GetRegion() (string, error) {
+ return string(v), nil
+}
+
+// GetRegion searchds the Configs for a RegionProvider and returns the value
+// if found. Returns an error if a provider fails before a value is found.
+func GetRegion(configs Configs) (string, bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(RegionProvider); ok {
+ v, err := p.GetRegion()
+ if err != nil {
+ return "", false, err
+ }
+ if len(v) > 0 {
+ return v, true, nil
+ }
+ }
+ }
+
+ return "", false, nil
+}
+
+// CredentialsValueProvider provides access to the credentials external
+// configuration value.
+type CredentialsValueProvider interface {
+ GetCredentialsValue() (aws.Credentials, error)
+}
+
+// WithCredentialsValue provides wrapping of a credentials Value to satisfy the
+// CredentialsValueProvider interface.
+type WithCredentialsValue aws.Credentials
+
+// GetCredentialsValue returns the credentials value.
+func (v WithCredentialsValue) GetCredentialsValue() (aws.Credentials, error) {
+ return aws.Credentials(v), nil
+}
+
+// GetCredentialsValue searchds the Configs for a CredentialsValueProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func GetCredentialsValue(configs Configs) (aws.Credentials, bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(CredentialsValueProvider); ok {
+ v, err := p.GetCredentialsValue()
+ if err != nil {
+ return aws.Credentials{}, false, err
+ }
+ if v.HasKeys() {
+ return v, true, nil
+ }
+ }
+ }
+
+ return aws.Credentials{}, false, nil
+}
+
+// CredentialsEndpointProvider provides access to the credentials endpoint
+// external configuration value.
+type CredentialsEndpointProvider interface {
+ GetCredentialsEndpoint() (string, error)
+}
+
+// WithCredentialsEndpoint provides wrapping of a string to satisfy the
+// CredentialsEndpointProvider interface.
+type WithCredentialsEndpoint string
+
+// GetCredentialsEndpoint returns the endpoint.
+func (p WithCredentialsEndpoint) GetCredentialsEndpoint() (string, error) {
+ return string(p), nil
+}
+
+// GetCredentialsEndpoint searchds the Configs for a CredentialsEndpointProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func GetCredentialsEndpoint(configs Configs) (string, bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(CredentialsEndpointProvider); ok {
+ v, err := p.GetCredentialsEndpoint()
+ if err != nil {
+ return "", false, err
+ }
+ if len(v) > 0 {
+ return v, true, nil
+ }
+ }
+ }
+
+ return "", false, nil
+}
+
+// ContainerCredentialsEndpointPathProvider provides access to the credentials endpoint path
+// external configuration value.
+type ContainerCredentialsEndpointPathProvider interface {
+ GetContainerCredentialsEndpointPath() (string, error)
+}
+
+// WithContainerCredentialsEndpointPath provides wrapping of a string to satisfy the
+// ContainerCredentialsEndpointPathProvider interface.
+type WithContainerCredentialsEndpointPath string
+
+// GetContainerCredentialsEndpointPath returns the endpoint path.
+func (p WithContainerCredentialsEndpointPath) GetContainerCredentialsEndpointPath() (string, error) {
+ return string(p), nil
+}
+
+// GetContainerCredentialsEndpointPath searchds the Configs for a
+// ContainerCredentialsEndpointPathProvider and returns the value if found.
+// Returns an error if a provider fails before a
+// value is found.
+func GetContainerCredentialsEndpointPath(configs Configs) (string, bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(ContainerCredentialsEndpointPathProvider); ok {
+ v, err := p.GetContainerCredentialsEndpointPath()
+ if err != nil {
+ return "", false, err
+ }
+ if len(v) > 0 {
+ return v, true, nil
+ }
+ }
+ }
+
+ return "", false, nil
+}
+
+// AssumeRoleConfigProvider provides access to the assume role config
+// external configuration value.
+type AssumeRoleConfigProvider interface {
+ GetAssumeRoleConfig() (AssumeRoleConfig, error)
+}
+
+// WithAssumeRoleConfig provides wrapping of a string to satisfy the
+// AssumeRoleConfigProvider interface.
+type WithAssumeRoleConfig AssumeRoleConfig
+
+// GetAssumeRoleConfig returns the AssumeRoleConfig.
+func (p WithAssumeRoleConfig) GetAssumeRoleConfig() (AssumeRoleConfig, error) {
+ return AssumeRoleConfig(p), nil
+}
+
+// GetAssumeRoleConfig searchds the Configs for a AssumeRoleConfigProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func GetAssumeRoleConfig(configs Configs) (AssumeRoleConfig, bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(AssumeRoleConfigProvider); ok {
+ v, err := p.GetAssumeRoleConfig()
+ if err != nil {
+ return AssumeRoleConfig{}, false, err
+ }
+ if len(v.RoleARN) > 0 && v.Source != nil {
+ return v, true, nil
+ }
+ }
+ }
+
+ return AssumeRoleConfig{}, false, nil
+}
+
+// MFATokenFuncProvider provides access to the MFA token function needed for
+// Assume Role with MFA.
+type MFATokenFuncProvider interface {
+ GetMFATokenFunc() (func() (string, error), error)
+}
+
+// WithMFATokenFunc provides wrapping of a string to satisfy the
+// MFATokenFuncProvider interface.
+type WithMFATokenFunc func() (string, error)
+
+// GetMFATokenFunc returns the MFA Token function.
+func (p WithMFATokenFunc) GetMFATokenFunc() (func() (string, error), error) {
+ return p, nil
+}
+
+// GetMFATokenFunc searchds the Configs for a MFATokenFuncProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func GetMFATokenFunc(configs Configs) (func() (string, error), bool, error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(MFATokenFuncProvider); ok {
+ v, err := p.GetMFATokenFunc()
+ if err != nil {
+ return nil, false, err
+ }
+ if v != nil {
+ return v, true, nil
+ }
+ }
+ }
+
+ return nil, false, nil
+}
+
+// WithEC2MetadataRegion provides a RegionProvider that retrieves the region
+// from the EC2 Metadata service.
+//
+// TODO add this provider to the default config loading?
+type WithEC2MetadataRegion struct {
+ Client *ec2metadata.EC2Metadata
+}
+
+// GetRegion attempts to retreive the region from EC2 Metadata service.
+func (p WithEC2MetadataRegion) GetRegion() (string, error) {
+ return p.Client.Region()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/resolve.go
new file mode 100644
index 0000000..9eff51a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/resolve.go
@@ -0,0 +1,216 @@
+package external
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/aws/endpointcreds"
+ "github.com/aws/aws-sdk-go-v2/aws/stscreds"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+)
+
+// ResolveDefaultAWSConfig will write default configuration values into the cfg
+// value. It will write the default values, overwriting any previous value.
+//
+// This should be used as the first resolver in the slice of resolvers when
+// resolving external configuration.
+func ResolveDefaultAWSConfig(cfg *aws.Config, configs Configs) error {
+ *cfg = defaults.Config()
+ return nil
+}
+
+// ResolveCustomCABundle extracts the first instance of a custom CA bundle filename
+// from the external configurations. It will update the HTTP Client's builder
+// to be configured with the custom CA bundle.
+//
+// Config provider used:
+// * CustomCABundleProvider
+func ResolveCustomCABundle(cfg *aws.Config, configs Configs) error {
+ v, found, err := GetCustomCABundle(configs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ return addHTTPClientCABundle(cfg.HTTPClient, v)
+}
+
+// ResolveRegion extracts the first instance of a Region from the Configs slice.
+//
+// Config providers used:
+// * RegionProvider
+func ResolveRegion(cfg *aws.Config, configs Configs) error {
+ v, found, err := GetRegion(configs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Region = v
+ return nil
+}
+
+// ResolveCredentialsValue extracts the first instance of Credentials from the
+// config slices.
+//
+// Config providers used:
+// * CredentialsValueProvider
+func ResolveCredentialsValue(cfg *aws.Config, configs Configs) error {
+ v, found, err := GetCredentialsValue(configs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Credentials = aws.StaticCredentialsProvider{Value: v}
+
+ return nil
+}
+
+// ResolveEndpointCredentials will extract the credentials endpoint from the config
+// slice. Using the endpoint, provided, to create a endpoint credential provider.
+//
+// Config providers used:
+// * CredentialsEndpointProvider
+func ResolveEndpointCredentials(cfg *aws.Config, configs Configs) error {
+ v, found, err := GetCredentialsEndpoint(configs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ if err := validateLocalURL(v); err != nil {
+ return err
+ }
+
+ cfgCp := cfg.Copy()
+ cfgCp.EndpointResolver = aws.ResolveWithEndpointURL(v)
+
+ provider := endpointcreds.New(cfgCp)
+ provider.ExpiryWindow = 5 * time.Minute
+
+ cfg.Credentials = provider
+
+ return nil
+}
+
+const containerCredentialsEndpoint = "http://169.254.170.2"
+
+// ResolveContainerEndpointPathCredentials will extract the container credentials
+// endpoint from the config slice. Using the endpoint provided, to create a
+// endpoint credential provider.
+//
+// Config providers used:
+// * ContainerCredentialsEndpointPathProvider
+func ResolveContainerEndpointPathCredentials(cfg *aws.Config, configs Configs) error {
+ v, found, err := GetContainerCredentialsEndpointPath(configs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfgCp := cfg.Copy()
+
+ v = containerCredentialsEndpoint + v
+ cfgCp.EndpointResolver = aws.ResolveWithEndpointURL(v)
+
+ provider := endpointcreds.New(cfgCp)
+ provider.ExpiryWindow = 5 * time.Minute
+
+ cfg.Credentials = provider
+
+ return nil
+}
+
+// ResolveAssumeRoleCredentials extracts the assume role configuration from
+// the external configurations.
+//
+// Config providers used:
+func ResolveAssumeRoleCredentials(cfg *aws.Config, configs Configs) error {
+ v, found, err := GetAssumeRoleConfig(configs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfgCp := cfg.Copy()
+ // TODO support additional credential providers that are already set?
+ cfgCp.Credentials = aws.StaticCredentialsProvider{Value: v.Source.Credentials}
+
+ provider := stscreds.NewAssumeRoleProvider(
+ sts.New(cfgCp), v.RoleARN,
+ )
+ provider.RoleSessionName = v.RoleSessionName
+
+ if id := v.ExternalID; len(id) > 0 {
+ provider.ExternalID = aws.String(id)
+ }
+ if len(v.MFASerial) > 0 {
+ tp, foundTP, err := GetMFATokenFunc(configs)
+ if err != nil {
+ return err
+ }
+ if !foundTP {
+ return fmt.Errorf("token provider required for AssumeRole with MFA")
+ }
+ provider.SerialNumber = aws.String(v.MFASerial)
+ provider.TokenProvider = tp
+ }
+
+ cfg.Credentials = provider
+
+ return nil
+}
+
+// ResolveFallbackEC2Credentials will configure the AWS config credentials to
+// use EC2 Instance Role always.
+func ResolveFallbackEC2Credentials(cfg *aws.Config, configs Configs) error {
+ cfgCp := cfg.Copy()
+ cfgCp.HTTPClient = shallowCopyHTTPClient(cfgCp.HTTPClient)
+ cfgCp.HTTPClient.Timeout = 5 * time.Second
+
+ provider := ec2rolecreds.NewProvider(ec2metadata.New(cfgCp))
+ provider.ExpiryWindow = 5 * time.Minute
+
+ cfg.Credentials = provider
+
+ return nil
+}
+
+func shallowCopyHTTPClient(client *http.Client) *http.Client {
+ return &http.Client{
+ Transport: client.Transport,
+ CheckRedirect: client.CheckRedirect,
+ Jar: client.Jar,
+ Timeout: client.Timeout,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/external/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/shared_config.go
new file mode 100644
index 0000000..c6dc624
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/external/shared_config.go
@@ -0,0 +1,425 @@
+package external
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/internal/ini"
+)
+
+const (
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+
+ // Additional Config fields
+ regionKey = `region`
+)
+
+// DefaultSharedConfigProfile is the default profile to be used when
+// loading configuration from the config files if another profile name
+// is not provided.
+var DefaultSharedConfigProfile = `default`
+
+// DefaultSharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func DefaultSharedCredentialsFilename() string {
+ return filepath.Join(userHomeDir(), ".aws", "credentials")
+}
+
+// DefaultSharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func DefaultSharedConfigFilename() string {
+ return filepath.Join(userHomeDir(), ".aws", "config")
+}
+
+// DefaultSharedConfigFiles is a slice of the default shared config files that
+// the will be used in order to load the SharedConfig.
+var DefaultSharedConfigFiles = []string{
+ DefaultSharedCredentialsFilename(),
+ DefaultSharedConfigFilename(),
+}
+
+// AssumeRoleConfig provides the values defining the configuration for an IAM
+// assume role.
+type AssumeRoleConfig struct {
+ RoleARN string
+ ExternalID string
+ MFASerial string
+ RoleSessionName string
+
+ sourceProfile string
+ Source *SharedConfig
+}
+
+// SharedConfig represents the configuration fields of the SDK config files.
+type SharedConfig struct {
+ Profile string
+
+ // Credentials values from the config file. Both aws_access_key_id
+ // and aws_secret_access_key must be provided together in the same file
+ // to be considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of the
+ // other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Credentials aws.Credentials
+
+ AssumeRole AssumeRoleConfig
+
+ // Region is the region the SDK should use for looking up AWS service endpoints
+ // and signing requests.
+ //
+ // region
+ Region string
+}
+
+// GetRegion returns the region for the profile if a region is set.
+func (c SharedConfig) GetRegion() (string, error) {
+ return c.Region, nil
+}
+
+// GetCredentialsValue returns the credentials for a profile if they were set.
+func (c SharedConfig) GetCredentialsValue() (aws.Credentials, error) {
+ return c.Credentials, nil
+}
+
+// GetAssumeRoleConfig returns the assume role config for a profile. Will be
+// a zero value if not set.
+func (c SharedConfig) GetAssumeRoleConfig() (AssumeRoleConfig, error) {
+ return c.AssumeRole, nil
+}
+
+// LoadSharedConfigIgnoreNotExist is an alias for LoadSharedConfig with the
+// addition of ignoring when none of the files exist or when the profile
+// is not found in any of the files.
+func LoadSharedConfigIgnoreNotExist(configs Configs) (Config, error) {
+ cfg, err := LoadSharedConfig(configs)
+ if err != nil {
+ if _, ok := err.(SharedConfigNotExistErrors); ok {
+ return SharedConfig{}, nil
+ }
+ return nil, err
+ }
+
+ return cfg, nil
+}
+
+// LoadSharedConfig uses the Configs passed in to load the SharedConfig from file
+// The file names and profile name are sourced from the Configs.
+//
+// If profile name is not provided DefaultSharedConfigProfile (default) will
+// be used.
+//
+// If shared config filenames are not provided DefaultSharedConfigFiles will
+// be used.
+//
+// Config providers used:
+// * SharedConfigProfileProvider
+// * SharedConfigFilesProvider
+func LoadSharedConfig(configs Configs) (Config, error) {
+ var profile string
+ var files []string
+ var ok bool
+ var err error
+
+ profile, ok, err = GetSharedConfigProfile(configs)
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ profile = DefaultSharedConfigProfile
+ }
+
+ files, ok, err = GetSharedConfigFiles(configs)
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ files = DefaultSharedConfigFiles
+ }
+
+ return NewSharedConfig(profile, files)
+}
+
+// NewSharedConfig retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+func NewSharedConfig(profile string, filenames []string) (SharedConfig, error) {
+ if len(filenames) == 0 {
+ return SharedConfig{}, fmt.Errorf("no shared config files provided")
+ }
+
+ files, err := loadSharedConfigIniFiles(filenames)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ cfg := SharedConfig{}
+ if err = cfg.setFromIniFiles(profile, files); err != nil {
+ return SharedConfig{}, err
+ }
+
+ if len(cfg.AssumeRole.sourceProfile) > 0 {
+ if err := cfg.setAssumeRoleSource(profile, files); err != nil {
+ return SharedConfig{}, err
+ }
+ }
+
+ return cfg, nil
+}
+
+type sharedConfigFile struct {
+ Filename string
+ IniData ini.Sections
+}
+
+func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
+ files := make([]sharedConfigFile, 0, len(filenames))
+
+ errs := SharedConfigNotExistErrors{}
+ for _, filename := range filenames {
+ sections, err := ini.OpenFile(filename)
+ if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
+ errs = append(errs,
+ SharedConfigFileNotExistError{Filename: filename, Err: err},
+ )
+ // Skip files which can't be opened and read for whatever reason
+ continue
+ } else if err != nil {
+ return nil, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+
+ files = append(files, sharedConfigFile{
+ Filename: filename, IniData: sections,
+ })
+ }
+
+ if len(files) == 0 {
+ return nil, errs
+ }
+
+ return files, nil
+}
+
+func (c *SharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
+ var assumeRoleSrc SharedConfig
+
+ // Multiple level assume role chains are not support
+ if c.AssumeRole.sourceProfile == origProfile {
+ assumeRoleSrc = *c
+ assumeRoleSrc.AssumeRole = AssumeRoleConfig{}
+ } else {
+ err := assumeRoleSrc.setFromIniFiles(c.AssumeRole.sourceProfile, files)
+ if err != nil {
+ return SharedConfigAssumeRoleError{
+ Profile: c.Profile,
+ RoleARN: c.AssumeRole.RoleARN,
+ Err: err,
+ }
+ }
+ }
+
+ if len(assumeRoleSrc.Credentials.AccessKeyID) == 0 {
+ return SharedConfigAssumeRoleError{
+ Profile: c.Profile,
+ RoleARN: c.AssumeRole.RoleARN,
+ Err: fmt.Errorf("source profile has no shared credentials"),
+ }
+ }
+
+ c.AssumeRole.Source = &assumeRoleSrc
+
+ return nil
+}
+
+// Returns an error if all of the files fail to load. If at least one file is
+// successfully loaded and contains the profile, no error will be returned.
+func (c *SharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
+ c.Profile = profile
+
+ existErrs := SharedConfigNotExistErrors{}
+ for _, f := range files {
+ if err := c.setFromIniFile(profile, f); err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistError); ok {
+ existErrs = append(existErrs, err)
+ continue
+ }
+ return err
+ }
+ }
+
+ if len(existErrs) == len(files) {
+ return existErrs
+ }
+
+ return nil
+}
+
+// setFromFile loads the configuration from the file using
+// the profile provided. A SharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (c *SharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
+ section, ok := file.IniData.GetSection(profile)
+ if !ok {
+ // Fallback to to alternate profile name: profile
+ section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
+ if !ok {
+ return SharedConfigProfileNotExistError{
+ Filename: file.Filename,
+ Profile: profile,
+ Err: nil,
+ }
+ }
+ }
+
+ // Shared Credentials
+ akid := section.String(accessKeyIDKey)
+ secret := section.String(secretAccessKey)
+ if len(akid) > 0 && len(secret) > 0 {
+ c.Credentials = aws.Credentials{
+ AccessKeyID: akid,
+ SecretAccessKey: secret,
+ SessionToken: section.String(sessionTokenKey),
+ Source: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+ }
+ }
+
+ // Assume Role
+ roleArn := section.String(roleArnKey)
+ srcProfile := section.String(sourceProfileKey)
+ if len(roleArn) > 0 && len(srcProfile) > 0 {
+ c.AssumeRole = AssumeRoleConfig{
+ RoleARN: roleArn,
+ ExternalID: section.String(externalIDKey),
+ MFASerial: section.String(mfaSerialKey),
+ RoleSessionName: section.String(roleSessionNameKey),
+
+ sourceProfile: srcProfile,
+ }
+ }
+
+ // Region
+ if v := section.String(regionKey); len(v) > 0 {
+ c.Region = v
+ }
+
+ return nil
+}
+
+// SharedConfigNotExistErrors provides an error type for failure to load shared
+// config because resources do not exist.
+type SharedConfigNotExistErrors []error
+
+func (es SharedConfigNotExistErrors) Error() string {
+ msg := "failed to load shared config\n"
+ for _, e := range es {
+ msg += "\t" + e.Error()
+ }
+ return msg
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Cause is the underlying error that caused the failure.
+func (e SharedConfigLoadError) Cause() error {
+ return e.Err
+}
+
+func (e SharedConfigLoadError) Error() string {
+ return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err)
+}
+
+// SharedConfigFileNotExistError is an error for the shared config when
+// the filename does not exist.
+type SharedConfigFileNotExistError struct {
+ Filename string
+ Profile string
+ Err error
+}
+
+// Cause is the underlying error that caused the failure.
+func (e SharedConfigFileNotExistError) Cause() error {
+ return e.Err
+}
+
+func (e SharedConfigFileNotExistError) Error() string {
+ return fmt.Sprintf("failed to open shared config file, %s, %v", e.Filename, e.Err)
+}
+
+// SharedConfigProfileNotExistError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistError struct {
+ Filename string
+ Profile string
+ Err error
+}
+
+// Cause is the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistError) Cause() error {
+ return e.Err
+}
+
+func (e SharedConfigProfileNotExistError) Error() string {
+ return fmt.Sprintf("failed to get shared config profile, %s, in %s, %v", e.Profile, e.Filename, e.Err)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ Profile string
+ RoleARN string
+ Err error
+}
+
+func (e SharedConfigAssumeRoleError) Error() string {
+ return fmt.Sprintf("failed to load assume role %s, of profile %s, %v",
+ e.RoleARN, e.Profile, e.Err)
+}
+
+func userHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/handlers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/handlers.go
new file mode 100644
index 0000000..be2b453
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/handlers.go
@@ -0,0 +1,256 @@
+package aws
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+ Complete HandlerList
+}
+
+// Copy returns of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ Complete: h.Complete.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+ h.Complete.Clear()
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ if len(l.list) == 0 {
+ return n
+ }
+
+ n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = l.list[0:0]
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.PushBackNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ if cap(l.list) == 0 {
+ l.list = make([]NamedHandler, 0, 5)
+ }
+ l.list = append(l.list, n)
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ if cap(l.list) == len(l.list) {
+ // Allocating new list required
+ l.list = append([]NamedHandler{n}, l.list...)
+ } else {
+ // Enough room to prepend into list.
+ l.list = append(l.list, NamedHandler{})
+ copy(l.list[1:], l.list)
+ l.list[0] = n
+ }
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+ for i := 0; i < len(l.list); i++ {
+ m := l.list[i]
+ if m.Name == name {
+ // Shift array preventing creating new arrays
+ copy(l.list[i:], l.list[i+1:])
+ l.list[len(l.list)-1] = NamedHandler{}
+ l.list = l.list[:len(l.list)-1]
+
+ // decrement list so next check to length is correct
+ i--
+ }
+ }
+}
+
+// SwapNamed will swap out any existing handlers with the same name as the
+// passed in NamedHandler returning true if handlers were swapped. False is
+// returned otherwise.
+func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == n.Name {
+ l.list[i].Fn = n.Fn
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// SetBackNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the end of the list.
+func (l *HandlerList) SetBackNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushBackNamed(n)
+ }
+}
+
+// SetFrontNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the beginning of
+// the list.
+func (l *HandlerList) SetFrontNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushFrontNamed(n)
+ }
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+ return func(r *Request) {
+ r.Handlers.Build.PushBack(func(r2 *Request) {
+ AddToUserAgent(r, s)
+ })
+ }
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/http_request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/http_request.go
new file mode 100644
index 0000000..40dfe40
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/http_request.go
@@ -0,0 +1,24 @@
+package aws
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := new(http.Request)
+ *req = *r
+ req.URL = &url.URL{}
+ *req.URL = *r.URL
+ req.Body = body
+
+ req.Header = http.Header{}
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/jsonvalue.go
new file mode 100644
index 0000000..91a6f27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/jsonvalue.go
@@ -0,0 +1,12 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+//
+// values := aws.JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logger.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logger.go
new file mode 100644
index 0000000..189e520
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logger.go
@@ -0,0 +1,97 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevel defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevel uint
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nil, will default to LogOff comparison.
+func (l LogLevel) Matches(v LogLevel) bool {
+ return l&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
+// to LogOff comparison.
+func (l LogLevel) AtLeast(v LogLevel) bool {
+ return l >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevel = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevel = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+//
+// TODO remove, moved to default pkg
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/offset_reader.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/offset_reader.go
new file mode 100644
index 0000000..cb4614c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/offset_reader.go
@@ -0,0 +1,58 @@
+package aws
+
+import (
+ "io"
+ "sync"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+ buf io.ReadSeeker
+ lock sync.Mutex
+ closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+ reader := &offsetReader{}
+ buf.Seek(offset, 0)
+
+ reader.buf = buf
+ return reader
+}
+
+// Close will close the instance of the offset reader's access to
+// the underlying io.ReadSeeker.
+func (o *offsetReader) Close() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ o.closed = true
+ return nil
+}
+
+// Read is a thread-safe read of the underlying io.ReadSeeker
+func (o *offsetReader) Read(p []byte) (int, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ if o.closed {
+ return 0, io.EOF
+ }
+
+ return o.buf.Read(p)
+}
+
+// Seek is a thread-safe seeking operation.
+func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ return o.buf.Seek(offset, whence)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
+ o.Close()
+ return newOffsetReader(o.buf, offset)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go
new file mode 100644
index 0000000..b180eb3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go
@@ -0,0 +1,587 @@
+package aws
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+const (
+ // ErrCodeSerialization is the serialization error code that is received
+ // during protocol unmarshaling.
+ ErrCodeSerialization = "SerializationError"
+
+ // ErrCodeRead is an error that is returned during HTTP reads.
+ ErrCodeRead = "ReadError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is received
+ // during body reads.
+ ErrCodeResponseTimeout = "ResponseTimeout"
+
+ // ErrCodeRequestCanceled is the error code that will be returned by an
+ // API request that was canceled. Requests given a Context may
+ // return this error when canceled.
+ ErrCodeRequestCanceled = "RequestCanceled"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config Config
+ Metadata Metadata
+ Handlers Handlers
+
+ Retryer
+ Time time.Time
+ ExpireTime time.Duration
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+ LastSignedAt time.Time
+ DisableFollowRedirects bool
+
+ context Context
+
+ built bool
+
+ // Need to persist an intermediate body between the input Body and HTTP
+ // request body because the HTTP Client's transport can maintain a reference
+ // to the HTTP request's body after the client has returned. This value is
+ // safe to use concurrently and wrap the input Body for each HTTP request.
+ safeBody *offsetReader
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+
+ BeforePresignFn func(r *Request) error
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg Config, metadata Metadata, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ // TODO improve this experiance for config copy?
+ cfg = cfg.Copy()
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+
+ // TODO need better way of handling this error... NewRequest should return error.
+ endpoint, err := cfg.EndpointResolver.ResolveEndpoint(metadata.ServiceName, cfg.Region)
+ if err == nil {
+ // TODO so ugly
+ metadata.Endpoint = endpoint.URL
+ if len(endpoint.SigningName) > 0 && !endpoint.SigningNameDerived {
+ metadata.SigningName = endpoint.SigningName
+ }
+ if len(endpoint.SigningRegion) > 0 {
+ metadata.SigningRegion = endpoint.SigningRegion
+ }
+
+ httpReq.URL, err = url.Parse(endpoint.URL + operation.HTTPPath)
+ if err != nil {
+ httpReq.URL = &url.URL{}
+ err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+ }
+ }
+
+ r := &Request{
+ Config: cfg,
+ Metadata: metadata,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: err,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+// var id2, versionID string
+// svc.PutObjectWithContext(ctx, params,
+// request.WithGetResponseHeader("x-amz-id-2", &id2),
+// request.WithGetResponseHeader("x-amz-version-id", &versionID),
+// )
+func WithGetResponseHeader(key string, val *string) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *val = req.HTTPResponse.Header.Get(key)
+ })
+ }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+// var headers http.Header
+// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *headers = req.HTTPResponse.Header
+ })
+ }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(LogDebugWithHTTPBody)
+func WithLogLevel(l LogLevel) Option {
+ return func(r *Request) {
+ r.Config.LogLevel = l
+ }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt(r)
+ }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context BackgroundContext will be returned.
+func (r *Request) Context() Context {
+ if r.context != nil {
+ return r.context
+ }
+ return BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx Context) {
+ if ctx == nil {
+ panic("context cannot be nil")
+ }
+ setRequestContext(r, ctx)
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ return r.Error != nil && BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.Body = reader
+ r.ResetBody()
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails.
+func (r *Request) Presign(expireTime time.Duration) (string, error) {
+ r.ExpireTime = expireTime
+ r.NotHoist = false
+
+ if r.Operation.BeforePresignFn != nil {
+ r = r.copy()
+ err := r.Operation.BeforePresignFn(r)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ r.Sign()
+ if r.Error != nil {
+ return "", r.Error
+ }
+ return r.HTTPRequest.URL.String(), nil
+}
+
+// PresignRequest behaves just like presign, with the addition of returning a
+// set of headers that were signed.
+//
+// Returns the URL string for the API operation with signature in the query string,
+// and the HTTP headers that were included in the signature. These headers must
+// be included in any HTTP request made with the presigned URL.
+//
+// To prevent hoisting any headers to the query string set NotHoist to true on
+// this Request value prior to calling PresignRequest.
+func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
+ r.ExpireTime = expireTime
+ r.Sign()
+ if r.Error != nil {
+ return "", nil, r.Error
+ }
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+ if !r.Config.LogLevel.Matches(LogDebugWithRequestErrors) {
+ return
+ }
+
+ retryStr := "not retrying"
+ if retrying {
+ retryStr = "will retry"
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.Metadata.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Anny additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", false, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request returning error if errors are encountered.
+//
+// Send will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
+ if r.safeBody != nil {
+ r.safeBody.Close()
+ }
+
+ r.safeBody = newOffsetReader(r.Body, r.BodyStart)
+
+ // Go 1.8 tightened and clarified the rules code needs to use when building
+ // requests with the http package. Go 1.8 removed the automatic detection
+ // of if the Request.Body was empty, or actually had bytes in it. The SDK
+ // always sets the Request.Body even if it is empty and should not actually
+ // be sent. This is incorrect.
+ //
+ // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+ // client that the request really should be sent without a body. The
+ // Request.Body cannot be set to nil, which is preferable, because the
+ // field is exported and could introduce nil pointer dereferences for users
+ // of the SDK if they used that field.
+ //
+ // Related golang/go#18257
+ l, err := computeBodyLength(r.Body)
+ if err != nil {
+ return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
+ }
+
+ var body io.ReadCloser
+ if l == 0 {
+ body = NoBody
+ } else if l > 0 {
+ body = r.safeBody
+ } else {
+ // Hack to prevent sending bodies for methods where the body
+ // should be ignored by the server. Sending bodies on these
+ // methods without an associated ContentLength will cause the
+ // request to socket timeout because the server does not handle
+ // Transfer-Encoding: chunked bodies for these methods.
+ //
+ // This would only happen if a ReaderSeekerCloser was used with
+ // a io.Reader that was not also an io.Seeker.
+ switch r.Operation.HTTPMethod {
+ case "GET", "HEAD", "DELETE":
+ body = NoBody
+ default:
+ body = r.safeBody
+ }
+ }
+
+ return body, nil
+}
+
+// Attempts to compute the length of the body of the reader using the
+// io.Seeker interface. If the value is not seekable because of being
+// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
+// If no error occurs the length of the body will be returned.
+func computeBodyLength(r io.ReadSeeker) (int64, error) {
+ seekable := true
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := r.(type) {
+ case ReaderSeekerCloser:
+ seekable = v.IsSeeker()
+ case *ReaderSeekerCloser:
+ seekable = v.IsSeeker()
+ }
+ if !seekable {
+ return -1, nil
+ }
+
+ curOffset, err := r.Seek(0, 1)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := r.Seek(0, 2)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = r.Seek(curOffset, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// GetBody will return an io.ReadSeeker of the Request's underlying
+// input body with a concurrency safe wrapper.
+func (r *Request) GetBody() io.ReadSeeker {
+ return r.safeBody
+}
+
+// Send will send the request returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+//
+// Send will not close the request.Request's body.
+func (r *Request) Send() error {
+ defer func() {
+ // Regardless of success or failure of the request trigger the Complete
+ // request handlers.
+ r.Handlers.Complete.Run(r)
+ }()
+
+ for {
+ if BoolValue(r.Retryable) {
+ if r.Config.LogLevel.Matches(LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.Metadata.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ // The previous http.Request will have a reference to the r.Body
+ // and the HTTP Client's Transport may still be reading from
+ // the request's body even though the Client's Do returned.
+ r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
+ r.ResetBody()
+
+ // Closing response body to ensure that no response body is leaked
+ // between retry attempts.
+ if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ }
+
+ r.Sign()
+ if r.Error != nil {
+ return r.Error
+ }
+
+ r.Retryable = nil
+
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ if !shouldRetryCancel(r) {
+ return r.Error
+ }
+
+ err := r.Error
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request", false, err)
+ return r.Error
+ }
+ debugLogReqError(r, "Send Request", true, err)
+ continue
+ }
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ r.Handlers.UnmarshalError.Run(r)
+ err := r.Error
+
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Response", false, err)
+ return r.Error
+ }
+ debugLogReqError(r, "Validate Response", true, err)
+ continue
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ err := r.Error
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response", false, err)
+ return r.Error
+ }
+ debugLogReqError(r, "Unmarshal Response", true, err)
+ continue
+ }
+
+ break
+ }
+
+ return nil
+}
+
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+ req := &Request{}
+ *req = *r
+ req.Handlers = r.Handlers.Copy()
+ op := *r.Operation
+ req.Operation = &op
+ return req
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
+
+func shouldRetryCancel(r *Request) bool {
+ awsErr, ok := r.Error.(awserr.Error)
+ timeoutErr := false
+ errStr := r.Error.Error()
+ if ok {
+ if awsErr.Code() == ErrCodeRequestCanceled {
+ return false
+ }
+ err := awsErr.OrigErr()
+ netErr, netOK := err.(net.Error)
+ timeoutErr = netOK && netErr.Temporary()
+ if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
+ errStr = urlErr.Err.Error()
+ }
+ }
+
+ // There can be two types of canceled errors here.
+ // The first being a net.Error and the other being an error.
+ // If the request was timed out, we want to continue the retry
+ // process. Otherwise, return the canceled error.
+ return timeoutErr ||
+ (errStr != "net/http: request canceled" &&
+ errStr != "net/http: request canceled while waiting for connection")
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_7.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_7.go
new file mode 100644
index 0000000..6db88c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_7.go
@@ -0,0 +1,39 @@
+// +build !go1.8
+
+package aws
+
+import "io"
+
+// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set Request.Body to nil.
+//
+// Copy of Go 1.8 NoBody type from net/http/http.go
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
+func (noBody) Close() error { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+// NoBody is an empty reader that will trigger the Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = noBody{}
+
+// ResetBody rewinds the request body back to its starting position, and
+// set's the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_8.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_8.go
new file mode 100644
index 0000000..ad81c3d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_1_8.go
@@ -0,0 +1,33 @@
+// +build go1.8
+
+package aws
+
+import (
+ "net/http"
+)
+
+// NoBody is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = http.NoBody
+
+// ResetBody rewinds the request body back to its starting position, and
+// set's the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+//
+// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
+// PUT/POST redirects.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+ r.HTTPRequest.GetBody = r.getNextRequestBody
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context.go
new file mode 100644
index 0000000..19fa4fb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context.go
@@ -0,0 +1,12 @@
+// +build go1.7
+
+package aws
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx Context) {
+ r.context = ctx
+ r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context_1_6.go
new file mode 100644
index 0000000..0b333d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_context_1_6.go
@@ -0,0 +1,12 @@
+// +build !go1.7
+
+package aws
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx Context) {
+ r.context = ctx
+ r.HTTPRequest.Cancel = ctx.Done()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request_pagination.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_pagination.go
new file mode 100644
index 0000000..15594c9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request_pagination.go
@@ -0,0 +1,187 @@
+package aws
+
+import (
+ "sync/atomic"
+
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+)
+
+// A Pager provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagier differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+// for p.Next() {
+// data := p.CurrentPage().(*s3.ListObjectsOutput)
+// // process the page's data
+// }
+// return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pager type.
+type Pager struct {
+ // Function to return a Request value for each pagination request.
+ // Any configuration or handlers that need to be applied to the request
+ // prior to getting the next page should be done here before the request
+ // returned.
+ //
+ // NewRequest should always be built from the same API operations. It is
+ // undefined if different API operations are returned on subsequent calls.
+ NewRequest func() (*Request, error)
+
+ started bool
+ nextTokens []interface{}
+
+ err error
+ curPage interface{}
+}
+
+// hasNextPage will return true if Pager is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pager) hasNextPage() bool {
+ return !(p.started && len(p.nextTokens) == 0)
+}
+
+// Err returns the error Pager encountered when retrieving the next page.
+func (p *Pager) Err() error {
+ return p.err
+}
+
+// CurrentPage returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pager) CurrentPage() interface{} {
+ return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pager) Next() bool {
+ if !p.hasNextPage() {
+ return false
+ }
+
+ req, err := p.NewRequest()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ if p.started {
+ for i, intok := range req.Operation.InputTokens {
+ awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+ }
+ }
+ p.started = true
+
+ err = req.Send()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ p.nextTokens = req.nextPageTokens()
+ p.curPage = req.Data
+
+ return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pager type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
+
+ if len(vs) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ v := vs[0]
+
+ switch tv := v.(type) {
+ case *string:
+ if len(StringValue(tv)) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ case string:
+ if len(tv) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ }
+
+ tokenAdded = true
+ tokens = append(tokens, v)
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger Logger, flag *int32, msg string) {
+ if logger == nil {
+ return
+ }
+ if atomic.CompareAndSwapInt32(flag, 0, 1) {
+ logger.Log(msg)
+ }
+}
+
+var (
+ logDeprecatedHasNextPage int32
+ logDeprecatedNextPage int32
+ logDeprecatedEachPage int32
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/response.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/response.go
new file mode 100644
index 0000000..ea7717b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/response.go
@@ -0,0 +1,8 @@
+package aws
+
+// Response provides the response meta data for a SDK API request's response.
+type Response struct {
+ // TODO these fields should be focused on response, not just embedded request value.
+ // Need refactor of request for this to be better.
+ Request *Request
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
new file mode 100644
index 0000000..a71b969
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
@@ -0,0 +1,160 @@
+package aws
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+// Retryer is an interface to control retry logic for a given service.
+// The default implementation used by most services is the client.DefaultRetryer
+// structure, which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ RetryRules(*Request) time.Duration
+ ShouldRetry(*Request) bool
+ MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *Config, retryer Retryer) *Config {
+ cfg.Retryer = retryer
+ return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ ErrCodeResponseTimeout: {},
+ "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
+}
+
+var throttleCodes = map[string]struct{}{
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "TooManyRequestsException": {}, // Lambda functions
+ "PriorRequestNotComplete": {}, // Route53
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+ _, ok := throttleCodes[code]
+ return ok
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+var validParentCodes = map[string]struct{}{
+ ErrCodeSerialization: {},
+ ErrCodeRead: {},
+}
+
+type temporaryError interface {
+ Temporary() bool
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+ if parentErr == nil {
+ return false
+ }
+
+ if _, ok := validParentCodes[parentErr.Code()]; !ok {
+ return false
+ }
+
+ err := parentErr.OrigErr()
+ if err == nil {
+ return false
+ }
+
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code())
+ }
+
+ if t, ok := err.(temporaryError); ok {
+ return t.Temporary() || isErrConnectionReset(err)
+ }
+
+ return isErrConnectionReset(err)
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
+ }
+ }
+ return false
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeThrottle(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
+// Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeExpiredCreds(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+ return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+ return IsErrorThrottle(r.Error)
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+ return IsErrorExpiredCreds(r.Error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/header_rules.go
new file mode 100644
index 0000000..244c86d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+ return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+ return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/options.go
new file mode 100644
index 0000000..6aa2ed2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+ v4.UnsignedPayload = true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/uri_path.go
new file mode 100644
index 0000000..bd082e9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/uri_path.go
@@ -0,0 +1,24 @@
+// +build go1.5
+
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
new file mode 100644
index 0000000..ddbade6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
@@ -0,0 +1,755 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+// "///"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/private/protocol/rest"
+)
+
+const (
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+
+ // emptyStringSHA256 is a SHA256 of an empty string
+ emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+ blacklist{
+ mapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ },
+ },
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+ whitelist{
+ mapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ },
+ },
+ patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+ blacklist{requiredSignedHeaders},
+ patterns{"X-Amz-"},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ // The authentication credentials the request will be signed against.
+ // This value must be set to sign requests.
+ Credentials aws.CredentialsProvider
+
+ // Sets the log level the signer should use when reporting information to
+ // the logger. If the logger is nil nothing will be logged. See
+ // aws.LogLevel for more information on available logging levels
+ //
+ // By default nothing will be logged.
+ Debug aws.LogLevel
+
+ // The logger loging information will be written to. If there the logger
+ // is nil, nothing will be logged.
+ Logger aws.Logger
+
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // Disales the automatical setting of the HTTP request's Body field with the
+ // io.ReadSeeker passed in to the signer. This is useful if you're using a
+ // custom wrapper around the body for the io.ReadSeeker and want to preserve
+ // the Body value on the Request.Body.
+ //
+ // This does run the risk of signing a request with a body that will not be
+ // sent in the request. Need to ensure that the underlying data of the Body
+ // values are the same.
+ DisableRequestBodyOverwrite bool
+
+ // UnsignedPayload will prevent signing of the payload. This will only
+ // work for services that have support for this.
+ UnsignedPayload bool
+}
+
+// NewSigner returns a Signer pointer configured with the credentials and optional
+// option values provided. If not options are provided the Signer will use its
+// default configuration.
+func NewSigner(credsProvider aws.CredentialsProvider, options ...func(*Signer)) *Signer {
+ v4 := &Signer{
+ Credentials: credsProvider,
+ }
+
+ for _, option := range options {
+ option(v4)
+ }
+
+ return v4
+}
+
+type signingCtx struct {
+ ServiceName string
+ Region string
+ Request *http.Request
+ Body io.ReadSeeker
+ Query url.Values
+ Time time.Time
+ ExpireTime time.Duration
+ SignedHeaderVals http.Header
+
+ DisableURIPathEscaping bool
+
+ credValues aws.Credentials
+ isPresign bool
+ formattedTime string
+ formattedShortTime string
+ unsignedPayload bool
+
+ bodyDigest string
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, 0, signTime)
+}
+
+// Presign signs AWS v4 requests with the provided body, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. For presigned requests these headers
+// and their values must be included on the HTTP request when it is made. This
+// is helpful to know what header values need to be shared with the party the
+// presigned request will be distributed to.
+//
+// Presign differs from Sign in that it will sign the request using query string
+// instead of header values. This allows you to share the Presigned Request's
+// URL with third parties, or distribute it throughout your system with minimal
+// dependencies.
+//
+// Presign also takes an exp value which is the duration the
+// signed request will be valid after the signing time. This is allows you to
+// set when the request will expire.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+//
+// Presigning a S3 request will not compute the body's SHA256 hash by default.
+// This is done due to the general use case for S3 presigned URLs is to share
+// PUT/GET capabilities. If you would like to include the body's SHA256 in the
+// presigned request's signature you can set the "X-Amz-Content-Sha256"
+// HTTP header and that will be included in the request's signature.
+func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, exp, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ ctx := &signingCtx{
+ Request: r,
+ Body: body,
+ Query: r.URL.Query(),
+ Time: signTime,
+ ExpireTime: exp,
+ isPresign: exp != 0,
+ ServiceName: service,
+ Region: region,
+ DisableURIPathEscaping: v4.DisableURIPathEscaping,
+ unsignedPayload: v4.UnsignedPayload,
+ }
+
+ for key := range ctx.Query {
+ sort.Strings(ctx.Query[key])
+ }
+
+ if ctx.isRequestSigned() {
+ ctx.Time = sdk.NowTime()
+ ctx.handlePresignRemoval()
+ }
+
+ var err error
+ ctx.credValues, err = v4.Credentials.Retrieve()
+ if err != nil {
+ return http.Header{}, err
+ }
+
+ ctx.assignAmzQueryValues()
+ ctx.build(v4.DisableHeaderHoisting)
+
+ // If the request is not presigned the body should be attached to it. This
+ // prevents the confusion of wanting to send a signed request without
+ // the body the request was signed for attached.
+ if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
+ var reader io.ReadCloser
+ if body != nil {
+ var ok bool
+ if reader, ok = body.(io.ReadCloser); !ok {
+ reader = ioutil.NopCloser(body)
+ }
+ }
+ r.Body = reader
+ }
+
+ if v4.Debug.Matches(aws.LogDebugWithSigning) {
+ v4.logSigningInfo(ctx)
+ }
+
+ return ctx.SignedHeaderVals, nil
+}
+
+func (ctx *signingCtx) handlePresignRemoval() {
+ if !ctx.isPresign {
+ return
+ }
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ ctx.removePresign()
+
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ ctx.Request.URL.RawQuery = ctx.Query.Encode()
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if ctx.credValues.SessionToken != "" {
+ ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ } else {
+ ctx.Query.Del("X-Amz-Security-Token")
+ }
+
+ return
+ }
+
+ if ctx.credValues.SessionToken != "" {
+ ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ }
+}
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the V4 signature.
+var SignRequestHandler = aws.NamedHandler{
+ Name: "v4.SignRequestHandler", Fn: func(r *aws.Request) { SignSDKRequest(r) },
+}
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) aws.NamedHandler {
+ return aws.NamedHandler{
+ Name: name,
+ Fn: func(req *aws.Request) {
+ SignSDKRequest(req, opts...)
+ },
+ }
+}
+
+// SignSDKRequest signs an AWS request with the V4 signature. This
+// request handler should only be used with the SDK's built in service client's
+// API operation requests.
+//
+// This function should not be used on its on its own, but in conjunction with
+// an AWS service client's API operation call. To sign a standalone request
+// not created by a service client's API operation method use the "Sign" or
+// "Presign" functions of the "Signer" type.
+//
+// If the credentials of the request's config are set to
+// aws.AnonymousCredentials the request will not be signed.
+func SignSDKRequest(req *aws.Request, opts ...func(*Signer)) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Config.Credentials == aws.AnonymousCredentials {
+ return
+ }
+
+ region := req.Metadata.SigningRegion
+ if region == "" {
+ region = req.Config.Region
+ }
+
+ name := req.Metadata.SigningName
+ if name == "" {
+ name = req.Metadata.ServiceName
+ }
+
+ v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
+ v4.Debug = req.Config.LogLevel
+ v4.Logger = req.Config.Logger
+ v4.DisableHeaderHoisting = req.NotHoist
+ if name == "s3" {
+ // S3 service should not have any escaping applied
+ v4.DisableURIPathEscaping = true
+ }
+ // Prevents setting the HTTPRequest's Body. Since the Body could be
+ // wrapped in a custom io.Closer that we do not want to be stompped
+ // on top of by the signer.
+ v4.DisableRequestBodyOverwrite = true
+ })
+
+ for _, opt := range opts {
+ opt(v4)
+ }
+
+ signingTime := req.Time
+ if !req.LastSignedAt.IsZero() {
+ signingTime = req.LastSignedAt
+ }
+
+ signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
+ name, region, req.ExpireTime, signingTime,
+ )
+ if err != nil {
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ req.SignedHeaderVals = signedHeaders
+ req.LastSignedAt = sdk.NowTime()
+}
+
+const logSignInfoMsg = `DEBUG: Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
+ signedURLMsg := ""
+ if ctx.isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
+ }
+ msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
+ v4.Logger.Log(msg)
+}
+
+func (ctx *signingCtx) build(disableHeaderHoisting bool) {
+ ctx.buildTime() // no depends
+ ctx.buildCredentialString() // no depends
+
+ ctx.buildBodyDigest()
+
+ unsignedHeaders := ctx.Request.Header
+ if ctx.isPresign {
+ if !disableHeaderHoisting {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+ for k := range urlValues {
+ ctx.Query[k] = urlValues[k]
+ }
+ }
+ }
+
+ ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+ ctx.buildCanonicalString() // depends on canon headers / signed headers
+ ctx.buildStringToSign() // depends on canon string
+ ctx.buildSignature() // depends on string to sign
+
+ if ctx.isPresign {
+ ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+ "SignedHeaders=" + ctx.signedHeaders,
+ "Signature=" + ctx.signature,
+ }
+ ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ }
+}
+
+func (ctx *signingCtx) buildTime() {
+ ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
+ ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
+
+ if ctx.isPresign {
+ duration := int64(ctx.ExpireTime / time.Second)
+ ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
+ ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
+ }
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+ ctx.credentialString = strings.Join([]string{
+ ctx.formattedShortTime,
+ ctx.Region,
+ ctx.ServiceName,
+ "aws4_request",
+ }, "/")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
+ }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+ var headers []string
+ headers = append(headers, "host")
+ for k, v := range header {
+ canonicalKey := http.CanonicalHeaderKey(k)
+ if !r.IsValid(canonicalKey) {
+ continue // ignored header
+ }
+ if ctx.SignedHeaderVals == nil {
+ ctx.SignedHeaderVals = make(http.Header)
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+ // include additional values
+ ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ ctx.SignedHeaderVals[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ ctx.signedHeaders = strings.Join(headers, ";")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
+ }
+
+ headerValues := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ if ctx.Request.Host != "" {
+ headerValues[i] = "host:" + ctx.Request.Host
+ } else {
+ headerValues[i] = "host:" + ctx.Request.URL.Host
+ }
+ } else {
+ headerValues[i] = k + ":" +
+ strings.Join(ctx.SignedHeaderVals[k], ",")
+ }
+ }
+ stripExcessSpaces(headerValues)
+ ctx.canonicalHeaders = strings.Join(headerValues, "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+ ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+ uri := getURIPath(ctx.Request.URL)
+
+ if !ctx.DisableURIPathEscaping {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ ctx.canonicalString = strings.Join([]string{
+ ctx.Request.Method,
+ uri,
+ ctx.Request.URL.RawQuery,
+ ctx.canonicalHeaders + "\n",
+ ctx.signedHeaders,
+ ctx.bodyDigest,
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+ ctx.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ ctx.formattedTime,
+ ctx.credentialString,
+ hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+ secret := ctx.credValues.SecretAccessKey
+ date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
+ region := makeHmac(date, []byte(ctx.Region))
+ service := makeHmac(region, []byte(ctx.ServiceName))
+ credentials := makeHmac(service, []byte("aws4_request"))
+ signature := makeHmac(credentials, []byte(ctx.stringToSign))
+ ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() {
+ hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ includeSHA256Header := ctx.unsignedPayload ||
+ ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "glacier"
+
+ s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
+
+ if ctx.unsignedPayload || s3Presign {
+ hash = "UNSIGNED-PAYLOAD"
+ includeSHA256Header = !s3Presign
+ } else if ctx.Body == nil {
+ hash = emptyStringSHA256
+ } else {
+ hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
+ }
+
+ if includeSHA256Header {
+ ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
+ }
+ }
+ ctx.bodyDigest = hash
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+ if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if ctx.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (ctx *signingCtx) removePresign() {
+ ctx.Query.Del("X-Amz-Algorithm")
+ ctx.Query.Del("X-Amz-Signature")
+ ctx.Query.Del("X-Amz-Security-Token")
+ ctx.Query.Del("X-Amz-Date")
+ ctx.Query.Del("X-Amz-Expires")
+ ctx.Query.Del("X-Amz-Credential")
+ ctx.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+ hash := sha256.New()
+ start, _ := reader.Seek(0, 1)
+ defer reader.Seek(start, 0)
+
+ io.Copy(hash, reader)
+ return hash.Sum(nil)
+}
+
+const doubleSpace = " "
+
+// stripExcessSpaces will rewrite the passed in slice's string values to not
+// contain muliple side-by-side spaces.
+func stripExcessSpaces(vals []string) {
+ var j, k, l, m, spaces int
+ for i, str := range vals {
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ vals[i] = str
+ continue
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ vals[i] = string(buf[:m])
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/static_provider.go
new file mode 100644
index 0000000..0870016
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/static_provider.go
@@ -0,0 +1,52 @@
+package aws
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+// StaticCredentialsProviderName provides a name of Static provider
+const StaticCredentialsProviderName = "StaticCredentialsProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticCredentialsProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticCredentialsProvider struct {
+ Value Credentials
+}
+
+// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS credentials
+// passed in.
+func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider {
+ return StaticCredentialsProvider{
+ Value: Credentials{
+ AccessKeyID: key,
+ SecretAccessKey: secret,
+ SessionToken: session,
+ },
+ }
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s StaticCredentialsProvider) Retrieve() (Credentials, error) {
+ v := s.Value
+ if v.AccessKeyID == "" || v.SecretAccessKey == "" {
+ return Credentials{Source: StaticCredentialsProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ if len(v.Source) == 0 {
+ v.Source = StaticCredentialsProviderName
+ }
+
+ return v, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticCredentialsProvider, the credentials never expired.
+func (s StaticCredentialsProvider) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/stscreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/stscreds/provider.go
new file mode 100644
index 0000000..549a2c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/stscreds/provider.go
@@ -0,0 +1,264 @@
+/*
+Package stscreds are credential Providers to retrieve STS AWS credentials.
+
+STS provides multiple ways to retrieve credentials which can be used when making
+future AWS service API operation calls.
+
+The SDK will ensure that per instance of credentials.Credentials all requests
+to refresh the credentials will be synchronized. But, the SDK is unable to
+ensure synchronous usage of the AssumeRoleProvider if the value is shared
+between multiple Credentials or service clients.
+
+Assume Role
+
+To assume an IAM role using STS with the SDK you can create a new Credentials
+with the SDKs's stscreds package.
+
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to to make the STS Assume Role API.
+ cfg, err := external.LoadDefaultAWSConfig()
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ stsSvc := sts.New(cfg)
+ stsCredProvider := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn")
+
+ cfg.Credentials = aws.NewCredentials(stsCredProvider)
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(cfg)
+
+Assume Role with static MFA Token
+
+To assume an IAM role with a MFA token you can either specify a MFA token code
+directly or provide a function to prompt the user each time the credentials
+need to refresh the role's credentials. Specifying the TokenCode should be used
+for short lived operations that will not need to be refreshed, and when you do
+not want to have direct control over the user provides their MFA token.
+
+With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+credentials.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN using the MFA token code provided.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenCode = aws.String("00000000")
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with MFA Token Provider
+
+To assume an IAM role with MFA for longer running tasks where the credentials
+may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+will allow the credential provider to prompt for new MFA token code when the
+role's credentials need to be refreshed.
+
+The StdinTokenProvider function is available to prompt on stdin to retrieve
+the MFA token code from the user. You can also implement custom prompts by
+satisfing the TokenProvider function signature.
+
+Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+have undesirable results as the StdinTokenProvider will not be synchronized. A
+single Credentials with an AssumeRoleProvider can be shared safely.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenProvider = stscreds.StdinTokenProvider
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+*/
+package stscreds
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+)
+
+// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Printf("Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+ AssumeRoleRequest(input *sts.AssumeRoleInput) sts.AssumeRoleRequest
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+ aws.SafeCredentialsProvider
+
+ // STS client to make assume role request with.
+ Client AssumeRoler
+
+ // Role to be assumed.
+ RoleARN string
+
+ // Session name, if you wish to reuse the credentials elsewhere.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // If SerialNumber is set and neither TokenCode nor TokenProvider are also
+ // set an error will be returned.
+ TokenCode *string
+
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is also set and
+ // TokenCode is not set.
+ //
+ // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+ // TokenCode is ignored.
+ TokenProvider func() (string, error)
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewAssumeRoleProvider constructs and returns a credentials provider that
+// will retrieve credentials by assuming a IAM role using STS.
+func NewAssumeRoleProvider(client AssumeRoler, roleARN string) *AssumeRoleProvider {
+ p := &AssumeRoleProvider{
+ Client: client,
+ RoleARN: roleARN,
+ }
+ p.RetrieveFn = p.retrieveFn
+
+ return p
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) retrieveFn() (aws.Credentials, error) {
+ // Apply defaults where parameters are not set.
+ if len(p.RoleSessionName) == 0 {
+ // Try to work out a role name that will hopefully end up unique.
+ p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+ }
+ if p.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.Duration = DefaultDuration
+ }
+ input := &sts.AssumeRoleInput{
+ DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
+ RoleArn: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ ExternalId: p.ExternalID,
+ }
+ if p.Policy != nil {
+ input.Policy = p.Policy
+ }
+ if p.SerialNumber != nil {
+ if p.TokenCode != nil {
+ input.SerialNumber = p.SerialNumber
+ input.TokenCode = p.TokenCode
+ } else if p.TokenProvider != nil {
+ input.SerialNumber = p.SerialNumber
+ code, err := p.TokenProvider()
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return aws.Credentials{},
+ awserr.New("AssumeRoleTokenNotAvailable",
+ "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
+ }
+ }
+
+ req := p.Client.AssumeRoleRequest(input)
+ resp, err := req.Send()
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ return aws.Credentials{
+ AccessKeyID: *resp.Credentials.AccessKeyId,
+ SecretAccessKey: *resp.Credentials.SecretAccessKey,
+ SessionToken: *resp.Credentials.SessionToken,
+ Source: ProviderName,
+
+ CanExpire: true,
+ Expires: resp.Credentials.Expiration.Add(-p.ExpiryWindow),
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/timeout_read_closer.go
new file mode 100644
index 0000000..d8b5124
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/timeout_read_closer.go
@@ -0,0 +1,94 @@
+package aws
+
+import (
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+ ErrCodeResponseTimeout,
+ "read on body has reached the timeout limit",
+ nil,
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, timeoutErr
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+const (
+ // HandlerResponseTimeout is what we use to signify the name of the
+ // response timeout handler.
+ HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+ if err, ok := req.Error.(awserr.Error); ok {
+ aerr, ok := err.OrigErr().(awserr.Error)
+ if ok && aerr.Code() == ErrCodeResponseTimeout {
+ req.Error = aerr
+ }
+ }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+ return func(r *Request) {
+
+ var timeoutHandler = NamedHandler{
+ HandlerResponseTimeout,
+ func(req *Request) {
+ req.HTTPResponse.Body = &timeoutReadCloser{
+ reader: req.HTTPResponse.Body,
+ duration: duration,
+ }
+ }}
+
+ // remove the handler so we are not stomping over any new durations.
+ r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+ r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+ r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+ r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go
new file mode 100644
index 0000000..0e2d864
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go
@@ -0,0 +1,118 @@
+package aws
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
+// only be used with an io.Reader that is also an io.Seeker. Doing so may
+// cause request signature errors, or request body's not sent for GET, HEAD
+// and DELETE HTTP methods.
+//
+// Deprecated: Should only be used with io.ReadSeeker. If using for
+// S3 PutObject to stream content use s3manager.Uploader instead.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/url.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/url.go
new file mode 100644
index 0000000..6192b24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/url.go
@@ -0,0 +1,12 @@
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+ return url.Hostname()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/url_1_7.go
new file mode 100644
index 0000000..0210d27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/url_1_7.go
@@ -0,0 +1,29 @@
+// +build !go1.8
+
+package aws
+
+import (
+ "net/url"
+ "strings"
+)
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Copy of Go 1.8's net/url#URL.Hostname functionality.
+func URLHostname(url *url.URL) string {
+ return stripPort(url.Host)
+
+}
+
+// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
+// https://golang.org/src/net/url/url.go
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/validation.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/validation.go
new file mode 100644
index 0000000..6529ba6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/validation.go
@@ -0,0 +1,234 @@
+package aws
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+const (
+ // InvalidParameterErrCode is the error code for invalid parameters errors
+ InvalidParameterErrCode = "InvalidParameter"
+ // ParamRequiredErrCode is the error code for required parameter errors
+ ParamRequiredErrCode = "ParamRequiredError"
+ // ParamMinValueErrCode is the error code for fields with too low of a
+ // number value.
+ ParamMinValueErrCode = "ParamMinValueError"
+ // ParamMinLenErrCode is the error code for fields without enough elements.
+ ParamMinLenErrCode = "ParamMinLenError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+ Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+ return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+ return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+ return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Message())
+ }
+
+ return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+ return awserr.NewBatchError(
+ InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+ awserr.Error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+ context string
+ nestedContext string
+ field string
+ code string
+ msg string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+ return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+ return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+ return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+ field := e.context
+ if len(field) > 0 {
+ field += "."
+ }
+ if len(e.nestedContext) > 0 {
+ field += fmt.Sprintf("%s.", e.nestedContext)
+ }
+ field += e.field
+
+ return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ } else {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+ errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+ return &ErrParamRequired{
+ errInvalidParam{
+ code: ParamRequiredErrCode,
+ field: field,
+ msg: fmt.Sprintf("missing required field"),
+ },
+ }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+ errInvalidParam
+ min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+ return &ErrParamMinValue{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field value of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+ return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+ errInvalidParam
+ min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+ return &ErrParamMinLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field size of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+ return e.min
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go
new file mode 100644
index 0000000..1b33476
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "2.0.0-preview.5"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/waiter.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/waiter.go
new file mode 100644
index 0000000..6e3f7e7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/waiter.go
@@ -0,0 +1,284 @@
+package aws
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+ return func(w *Waiter) {
+ w.MaxAttempts = max
+ }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+ return func(attempt int) time.Duration {
+ return delay
+ }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+ return func(w *Waiter) {
+ w.Delay = delayer
+ }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger Logger) WaiterOption {
+ return func(w *Waiter) {
+ w.Logger = logger
+ }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+ return func(w *Waiter) {
+ w.RequestOptions = append(w.RequestOptions, opts...)
+ }
+}
+
+// A Waiter provides the functionality to perform a blocking call which will
+// wait for a resource state to be satisfied by a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+ Name string
+ Acceptors []WaiterAcceptor
+ Logger Logger
+
+ MaxAttempts int
+ Delay WaiterDelay
+
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+ SleepWithContext func(Context, time.Duration) error
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+ for _, fn := range opts {
+ fn(w)
+ }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+ switch s {
+ case SuccessWaiterState:
+ return "success"
+ case FailureWaiterState:
+ return "failure"
+ case RetryWaiterState:
+ return "retry"
+ default:
+ return "unknown waiter state"
+ }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+ SuccessWaiterState WaiterState = iota // waiter successful
+ FailureWaiterState // waiter failed
+ RetryWaiterState // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+ PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
+ PathWaiterMatch // match on specific path
+ PathAnyWaiterMatch // match on any path
+ PathListWaiterMatch // match on list of paths
+ StatusWaiterMatch // match on status code
+ ErrorWaiterMatch // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+ switch m {
+ case PathAllWaiterMatch:
+ return "pathAll"
+ case PathWaiterMatch:
+ return "path"
+ case PathAnyWaiterMatch:
+ return "pathAny"
+ case PathListWaiterMatch:
+ return "pathList"
+ case StatusWaiterMatch:
+ return "status"
+ case ErrorWaiterMatch:
+ return "error"
+ default:
+ return "unknown waiter match mode"
+ }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx Context) error {
+
+ for attempt := 1; ; attempt++ {
+ req, err := w.NewRequest(w.RequestOptions)
+ if err != nil {
+ waiterLogf(w.Logger, "unable to create request %v", err)
+ return err
+ }
+ req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+ err = req.Send()
+
+ // See if any of the acceptors match the request's response, or error
+ for _, a := range w.Acceptors {
+ if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+ return matchErr
+ }
+ }
+
+ // The Waiter should only check the resource state MaxAttempts times
+ // This is here instead of in the for loop above to prevent delaying
+ // unnecessary when the waiter will not retry.
+ if attempt == w.MaxAttempts {
+ break
+ }
+
+ // Delay to wait before inspecting the resource again
+ if err := sdk.SleepWithContext(ctx, w.Delay(attempt)); err != nil {
+ return awserr.New(ErrCodeRequestCanceled, "waiter context canceled", err)
+ }
+ }
+
+ return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+ State WaiterState
+ Matcher WaiterMatchMode
+ Argument string
+ Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l Logger, req *Request, err error) (bool, error) {
+ result := false
+ var vals []interface{}
+
+ switch a.Matcher {
+ case PathAllWaiterMatch, PathWaiterMatch:
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case PathAnyWaiterMatch:
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case PathListWaiterMatch:
+ // ignored matcher
+ case StatusWaiterMatch:
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case ErrorWaiterMatch:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+ name, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ return false, nil
+ }
+
+ switch a.State {
+ case SuccessWaiterState:
+ // waiter completed
+ return true, nil
+ case FailureWaiterState:
+ // Waiter failure state triggered
+ return true, awserr.New(WaiterResourceNotReadyErrorCode,
+ "failed waiting for successful resource state", err)
+ case RetryWaiterState:
+ // clear the error and retry the operation
+ return false, nil
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+ name, a.State)
+ return false, nil
+ }
+}
+
+func waiterLogf(logger Logger, msg string, args ...interface{}) {
+ if logger != nil {
+ logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
new file mode 100644
index 0000000..938cd14
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
@@ -0,0 +1,112 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ if dst.Kind() == reflect.String {
+ dst.SetString(e.String())
+ } else {
+ dst.Set(reflect.New(e))
+ }
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if dst.Kind() != reflect.String && src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go
new file mode 100644
index 0000000..bcfe51a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go
@@ -0,0 +1,33 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type the are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ // Special casing for strings as typed enumerations are string aliases
+ // but are not deep equal.
+ if ra.Kind() == reflect.String && rb.Kind() == reflect.String {
+ return ra.String() == rb.String()
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go
new file mode 100644
index 0000000..7e69bd5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go
@@ -0,0 +1,225 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, valItem := range values {
+ value := reflect.Indirect(valItem)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ if dstVal.Kind() == reflect.String {
+ dstVal.SetString(srcVal.String())
+ } else {
+ dstVal.Set(srcVal)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
new file mode 100644
index 0000000..710eb43
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
@@ -0,0 +1,113 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, " len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
new file mode 100644
index 0000000..b6432f1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
@@ -0,0 +1,89 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ stringValue(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go
new file mode 100644
index 0000000..e83a998
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go
@@ -0,0 +1,120 @@
+package ini
+
+// ASTKind represents different states in the parse table
+// and the type of AST that is being constructed
+type ASTKind int
+
+// ASTKind* is used in the parse table to transition between
+// the different states
+const (
+ ASTKindNone = ASTKind(iota)
+ ASTKindStart
+ ASTKindExpr
+ ASTKindEqualExpr
+ ASTKindStatement
+ ASTKindSkipStatement
+ ASTKindExprStatement
+ ASTKindSectionStatement
+ ASTKindNestedSectionStatement
+ ASTKindCompletedNestedSectionStatement
+ ASTKindCommentStatement
+ ASTKindCompletedSectionStatement
+)
+
+func (k ASTKind) String() string {
+ switch k {
+ case ASTKindNone:
+ return "none"
+ case ASTKindStart:
+ return "start"
+ case ASTKindExpr:
+ return "expr"
+ case ASTKindStatement:
+ return "stmt"
+ case ASTKindSectionStatement:
+ return "section_stmt"
+ case ASTKindExprStatement:
+ return "expr_stmt"
+ case ASTKindCommentStatement:
+ return "comment"
+ case ASTKindNestedSectionStatement:
+ return "nested_section_stmt"
+ case ASTKindCompletedSectionStatement:
+ return "completed_stmt"
+ case ASTKindSkipStatement:
+ return "skip"
+ default:
+ return ""
+ }
+}
+
+// AST interface allows us to determine what kind of node we
+// are on and casting may not need to be necessary.
+//
+// The root is always the first node in Children
+type AST struct {
+ Kind ASTKind
+ Root Token
+ RootToken bool
+ Children []AST
+}
+
+func newAST(kind ASTKind, root AST, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Children: append([]AST{root}, children...),
+ }
+}
+
+func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Root: root,
+ RootToken: true,
+ Children: children,
+ }
+}
+
+// AppendChild will append to the list of children an AST has.
+func (a *AST) AppendChild(child AST) {
+ a.Children = append(a.Children, child)
+}
+
+// GetRoot will return the root AST which can be the first entry
+// in the children list or a token.
+func (a *AST) GetRoot() AST {
+ if a.RootToken {
+ return *a
+ }
+
+ if len(a.Children) == 0 {
+ return AST{}
+ }
+
+ return a.Children[0]
+}
+
+// GetChildren will return the current AST's list of children
+func (a *AST) GetChildren() []AST {
+ if len(a.Children) == 0 {
+ return []AST{}
+ }
+
+ if a.RootToken {
+ return a.Children
+ }
+
+ return a.Children[1:]
+}
+
+// SetChildren will set and override all children of the AST.
+func (a *AST) SetChildren(children []AST) {
+ if a.RootToken {
+ a.Children = children
+ } else {
+ a.Children = append(a.Children[:1], children...)
+ }
+}
+
+// Start is used to indicate the starting state of the parse table.
+var Start = newAST(ASTKindStart, AST{})
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go
new file mode 100644
index 0000000..0895d53
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go
@@ -0,0 +1,11 @@
+package ini
+
+var commaRunes = []rune(",")
+
+func isComma(b rune) bool {
+ return b == ','
+}
+
+func newCommaToken() Token {
+ return newToken(TokenComma, commaRunes, NoneType)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go
new file mode 100644
index 0000000..8d03fd3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go
@@ -0,0 +1,39 @@
+package ini
+
+// isComment will return whether or not the next byte(s) is a
+// comment.
+func isComment(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case ';':
+ return true
+ case '#':
+ return true
+ case '/':
+ if len(b) > 1 {
+ return b[1] == '/'
+ }
+ }
+
+ return false
+}
+
+// newCommentToken will create a comment token and
+// return how many bytes were read.
+func newCommentToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '\n' {
+ break
+ }
+
+ if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
+ break
+ }
+ }
+
+ return newToken(TokenComment, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go
new file mode 100644
index 0000000..445b503
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go
@@ -0,0 +1,30 @@
+// Package ini is an LL(1) parser for configuration files.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
+//
+// Below is the BNF that describes this parser
+// Grammar:
+// stmt -> value stmt'
+// stmt' -> epsilon | op stmt
+// value -> number | string | boolean | quoted_string
+//
+// section -> [ section'
+// section' -> value section_close
+// section_close -> ]
+//
+// SkipState will skip (NL WS)+
+//
+// comment -> # comment' | ; comment' | / comment_slash
+// comment_slash -> / comment'
+// comment' -> epsilon | value
+package ini
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go
new file mode 100644
index 0000000..04345a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go
@@ -0,0 +1,4 @@
+package ini
+
+// emptyToken is used to satisfy the Token interface
+var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go
new file mode 100644
index 0000000..91ba2a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go
@@ -0,0 +1,24 @@
+package ini
+
+// newExpression will return an expression AST.
+// Expr represents an expression
+//
+// grammar:
+// expr -> string | number
+func newExpression(tok Token) AST {
+ return newASTWithRootToken(ASTKindExpr, tok)
+}
+
+func newEqualExpr(left AST, tok Token) AST {
+ return newASTWithRootToken(ASTKindEqualExpr, tok, left)
+}
+
+// EqualExprKey will return a LHS value in the equal expr
+func EqualExprKey(ast AST) string {
+ children := ast.GetChildren()
+ if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
+ return ""
+ }
+
+ return string(children[0].Root.Raw())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go
new file mode 100644
index 0000000..8d462f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go
@@ -0,0 +1,17 @@
+// +build gofuzz
+
+package ini
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ b := bytes.NewReader(data)
+
+ if _, err := Parse(b); err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
new file mode 100644
index 0000000..af6f397
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
@@ -0,0 +1,51 @@
+package ini
+
+import (
+ "io"
+ "os"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+// OpenFile takes a path to a given file, and will open and parse
+// that file.
+func OpenFile(path string) (Sections, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
+ }
+ defer f.Close()
+
+ return Parse(f)
+}
+
+// Parse will parse the given file using the shared config
+// visitor.
+func Parse(f io.Reader) (Sections, error) {
+ tree, err := ParseAST(f)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
+
+// ParseBytes will parse the given bytes and return the parsed sections.
+func ParseBytes(b []byte) (Sections, error) {
+ tree, err := ParseASTBytes(b)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go
new file mode 100644
index 0000000..898ebb0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go
@@ -0,0 +1,165 @@
+package ini
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+const (
+ // ErrCodeUnableToReadFile is used when a file is failed to be
+ // opened or read from.
+ ErrCodeUnableToReadFile = "FailedRead"
+)
+
+// TokenType represents the various different tokens types
+type TokenType int
+
+func (t TokenType) String() string {
+ switch t {
+ case TokenNone:
+ return "none"
+ case TokenLit:
+ return "literal"
+ case TokenSep:
+ return "sep"
+ case TokenOp:
+ return "op"
+ case TokenWS:
+ return "ws"
+ case TokenNL:
+ return "newline"
+ case TokenComment:
+ return "comment"
+ case TokenComma:
+ return "comma"
+ default:
+ return ""
+ }
+}
+
+// TokenType enums
+const (
+ TokenNone = TokenType(iota)
+ TokenLit
+ TokenSep
+ TokenComma
+ TokenOp
+ TokenWS
+ TokenNL
+ TokenComment
+)
+
+type iniLexer struct{}
+
+// Tokenize will return a list of tokens during lexical analysis of the
+// io.Reader.
+func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
+ }
+
+ return l.tokenize(b)
+}
+
+func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
+ runes := bytes.Runes(b)
+ var err error
+ n := 0
+ tokenAmount := countTokens(runes)
+ tokens := make([]Token, tokenAmount)
+ count := 0
+
+ for len(runes) > 0 && count < tokenAmount {
+ switch {
+ case isWhitespace(runes[0]):
+ tokens[count], n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ tokens[count], n = newCommaToken(), 1
+ case isComment(runes):
+ tokens[count], n, err = newCommentToken(runes)
+ case isNewline(runes):
+ tokens[count], n, err = newNewlineToken(runes)
+ case isSep(runes):
+ tokens[count], n, err = newSepToken(runes)
+ case isOp(runes):
+ tokens[count], n, err = newOpToken(runes)
+ default:
+ tokens[count], n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ count++
+
+ runes = runes[n:]
+ }
+
+ return tokens[:count], nil
+}
+
+func countTokens(runes []rune) int {
+ count, n := 0, 0
+ var err error
+
+ for len(runes) > 0 {
+ switch {
+ case isWhitespace(runes[0]):
+ _, n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ _, n = newCommaToken(), 1
+ case isComment(runes):
+ _, n, err = newCommentToken(runes)
+ case isNewline(runes):
+ _, n, err = newNewlineToken(runes)
+ case isSep(runes):
+ _, n, err = newSepToken(runes)
+ case isOp(runes):
+ _, n, err = newOpToken(runes)
+ default:
+ _, n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return 0
+ }
+
+ count++
+ runes = runes[n:]
+ }
+
+ return count + 1
+}
+
+// Token indicates a metadata about a given value.
+type Token struct {
+ t TokenType
+ ValueType ValueType
+ base int
+ raw []rune
+}
+
+var emptyValue = Value{}
+
+func newToken(t TokenType, raw []rune, v ValueType) Token {
+ return Token{
+ t: t,
+ raw: raw,
+ ValueType: v,
+ }
+}
+
+// Raw return the raw runes that were consumed
+func (tok Token) Raw() []rune {
+ return tok.raw
+}
+
+// Type returns the token type
+func (tok Token) Type() TokenType {
+ return tok.t
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go
new file mode 100644
index 0000000..3bf4390
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go
@@ -0,0 +1,312 @@
+package ini
+
+import (
+ "fmt"
+ "io"
+)
+
+// State enums for the parse table
+const (
+ InvalidState = iota
+ // stmt -> value stmt'
+ StatementState
+ // stmt' -> MarkComplete | op stmt
+ StatementPrimeState
+ // value -> number | string | boolean | quoted_string
+ ValueState
+ // section -> [ section'
+ OpenScopeState
+ // section' -> value section_close
+ SectionState
+ // section_close -> ]
+ CloseScopeState
+ // SkipState will skip (NL WS)+
+ SkipState
+ // SkipTokenState will skip any token and push the previous
+ // state onto the stack.
+ SkipTokenState
+ // comment -> # comment' | ; comment' | / comment_slash
+ // comment_slash -> / comment'
+ // comment' -> MarkComplete | value
+ CommentState
+ // MarkComplete state will complete statements and move that
+ // to the completed AST list
+ MarkCompleteState
+ // TerminalState signifies that the tokens have been fully parsed
+ TerminalState
+)
+
+// parseTable is a state machine to dictate the grammar above.
+var parseTable = map[ASTKind]map[TokenType]int{
+ ASTKindStart: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+ ASTKindCommentStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExpr: map[TokenType]int{
+ TokenOp: StatementPrimeState,
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindEqualExpr: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ },
+ ASTKindStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExprStatement: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenOp: ValueState,
+ TokenWS: ValueState,
+ TokenNL: MarkCompleteState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ TokenComma: SkipState,
+ },
+ ASTKindSectionStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ },
+ ASTKindCompletedSectionStatement: map[TokenType]int{
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindSkipStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+}
+
+// ParseAST will parse input from an io.Reader using
+// an LL(1) parser.
+func ParseAST(r io.Reader) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.Tokenize(r)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+// ParseASTBytes will parse input from a byte slice using
+// an LL(1) parser.
+func ParseASTBytes(b []byte) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.tokenize(b)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+func parse(tokens []Token) ([]AST, error) {
+ start := Start
+ stack := newParseStack(3, len(tokens))
+
+ stack.Push(start)
+ s := newSkipper()
+
+loop:
+ for stack.Len() > 0 {
+ k := stack.Pop()
+
+ var tok Token
+ if len(tokens) == 0 {
+ // this occurs when all the tokens have been processed
+ // but reduction of what's left on the stack needs to
+ // occur.
+ tok = emptyToken
+ } else {
+ tok = tokens[0]
+ }
+
+ step := parseTable[k.Kind][tok.Type()]
+ if s.ShouldSkip(tok) {
+ step = SkipTokenState
+ }
+
+ switch step {
+ case TerminalState:
+ // Finished parsing. Push what should be the last
+ // statement to the stack. If there is anything left
+ // on the stack, an error in parsing has occurred.
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ break loop
+ case SkipTokenState:
+ // When skipping a token, the previous state was popped off the stack.
+ // To maintain the correct state, the previous state will be pushed
+ // onto the stack.
+ stack.Push(k)
+ case StatementState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ expr := newExpression(tok)
+ stack.Push(expr)
+ case StatementPrimeState:
+ if tok.Type() != TokenOp {
+ stack.MarkComplete(k)
+ continue
+ }
+
+ if k.Kind != ASTKindExpr {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
+ )
+ }
+
+ expr := newEqualExpr(k, tok)
+ stack.Push(expr)
+ case ValueState:
+ // ValueState requires the previous state to either be an equal expression
+ // or an expression statement.
+ //
+ // This grammar occurs when the RHS is a number, word, or quoted string.
+ // equal_expr -> lit op equal_expr'
+ // equal_expr' -> number | string | quoted_string
+ // quoted_string -> " quoted_string'
+ // quoted_string' -> string quoted_string_end
+ // quoted_string_end -> "
+ //
+ // otherwise
+ // expr_stmt -> equal_expr (expr_stmt')*
+ // expr_stmt' -> ws S | op S | MarkComplete
+ // S -> equal_expr' expr_stmt'
+ switch k.Kind {
+ case ASTKindEqualExpr:
+ // assiging a value to some key
+ k.AppendChild(newExpression(tok))
+ stack.Push(newExprStatement(k))
+ case ASTKindExprStatement:
+ root := k.GetRoot()
+ children := root.GetChildren()
+ if len(children) == 0 {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
+ )
+ }
+
+ rhs := children[len(children)-1]
+
+ if rhs.Root.ValueType != QuotedStringType {
+ rhs.Root.ValueType = StringType
+ rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
+
+ }
+
+ children[len(children)-1] = rhs
+ k.SetChildren(children)
+
+ stack.Push(k)
+ }
+ case OpenScopeState:
+ if !runeCompare(tok.Raw(), openBrace) {
+ return nil, NewParseError("expected '['")
+ }
+
+ stmt := newStatement()
+ stack.Push(stmt)
+ case CloseScopeState:
+ if !runeCompare(tok.Raw(), closeBrace) {
+ return nil, NewParseError("expected ']'")
+ }
+
+ stack.Push(newCompletedSectionStatement(k))
+ case SectionState:
+ var stmt AST
+
+ switch k.Kind {
+ case ASTKindStatement:
+ // If there are multiple literals inside of a scope declaration,
+ // then the current token's raw value will be appended to the Name.
+ //
+ // This handles cases like [ profile default ]
+ //
+ // k will represent a SectionStatement with the children representing
+ // the label of the section
+ stmt = newSectionStatement(tok)
+ case ASTKindSectionStatement:
+ k.Root.raw = append(k.Root.raw, ' ')
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stmt = k
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
+ )
+ }
+
+ stack.Push(stmt)
+ case MarkCompleteState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ if stack.Len() == 0 {
+ stack.Push(start)
+ }
+ case SkipState:
+ stack.Push(newSkipStatement(k))
+ s.Skip()
+ case CommentState:
+ if k.Kind == ASTKindStart {
+ stack.Push(k)
+ } else {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newCommentStatement(tok)
+ stack.Push(stmt)
+ default:
+ return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok))
+ }
+
+ if len(tokens) > 0 {
+ tokens = tokens[1:]
+ }
+ }
+
+ // this occurs when a statement has not been completed
+ if stack.top > 1 {
+ return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
+ }
+
+ // returns a sublist which exludes the start symbol
+ return stack.List(), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go
new file mode 100644
index 0000000..61754c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go
@@ -0,0 +1,317 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ runesTrue = []rune("true")
+ runesFalse = []rune("false")
+)
+
+var literalValues = [][]rune{
+ runesTrue,
+ runesFalse,
+}
+
+func isBoolValue(b []rune) bool {
+ for _, lv := range literalValues {
+ if isLitValue(lv, b) {
+ return true
+ }
+ }
+ return false
+}
+
+func isLitValue(want, have []rune) bool {
+ if len(have) < len(want) {
+ return false
+ }
+
+ for i := 0; i < len(want); i++ {
+ if want[i] != have[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isNumberValue will return whether not the leading characters in
+// a byte slice is a number. A number is delimited by whitespace or
+// the newline token.
+//
+// A number is defined to be in a binary, octal, decimal (int | float), hex format,
+// or in scientific notation.
+func isNumberValue(b []rune) bool {
+ negativeIndex := 0
+ helper := numberHelper{}
+ needDigit := false
+
+ for i := 0; i < len(b); i++ {
+ negativeIndex++
+
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return false
+ }
+ helper.Determine(b[i])
+ needDigit = true
+ continue
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ negativeIndex = 0
+ needDigit = true
+ continue
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ needDigit = true
+ if i == 0 {
+ return false
+ }
+
+ fallthrough
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ needDigit = true
+ continue
+ }
+
+ if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
+ return !needDigit
+ }
+
+ if !helper.CorrectByte(b[i]) {
+ return false
+ }
+ needDigit = false
+ }
+
+ return !needDigit
+}
+
+func isValid(b []rune) (bool, int, error) {
+ if len(b) == 0 {
+ // TODO: should probably return an error
+ return false, 0, nil
+ }
+
+ return isValidRune(b[0]), 1, nil
+}
+
+func isValidRune(r rune) bool {
+ return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
+}
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case DecimalType:
+ return "FLOAT"
+ case IntegerType:
+ return "INT"
+ case StringType:
+ return "STRING"
+ case BoolType:
+ return "BOOL"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ DecimalType
+ IntegerType
+ StringType
+ QuotedStringType
+ BoolType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+ raw []rune
+
+ integer int64
+ decimal float64
+ boolean bool
+ str string
+}
+
+func newValue(t ValueType, base int, raw []rune) (Value, error) {
+ v := Value{
+ Type: t,
+ raw: raw,
+ }
+ var err error
+
+ switch t {
+ case DecimalType:
+ v.decimal, err = strconv.ParseFloat(string(raw), 64)
+ if err != nil {
+ panic(err)
+ }
+ case IntegerType:
+ if base != 10 {
+ raw = raw[2:]
+ }
+
+ v.integer, err = strconv.ParseInt(string(raw), base, 64)
+ case StringType:
+ v.str = string(raw)
+ case QuotedStringType:
+ v.str = string(raw[1 : len(raw)-1])
+ case BoolType:
+ v.boolean = runeCompare(v.raw, runesTrue)
+ }
+
+ return v, err
+}
+
+// Append will append values and change the type to a string
+// type.
+func (v *Value) Append(tok Token) {
+ r := tok.Raw()
+ if v.Type != QuotedStringType {
+ v.Type = StringType
+ r = tok.raw[1 : len(tok.raw)-1]
+ }
+ if tok.Type() != TokenLit {
+ v.raw = append(v.raw, tok.Raw()...)
+ } else {
+ v.raw = append(v.raw, r...)
+ }
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case DecimalType:
+ return fmt.Sprintf("decimal: %f", v.decimal)
+ case IntegerType:
+ return fmt.Sprintf("integer: %d", v.integer)
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.raw))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.raw))
+ case BoolType:
+ return fmt.Sprintf("bool: %t", v.boolean)
+ default:
+ return "union not set"
+ }
+}
+
+func newLitToken(b []rune) (Token, int, error) {
+ n := 0
+ var err error
+
+ token := Token{}
+ if b[0] == '"' {
+ n, err = getStringValue(b)
+ if err != nil {
+ return token, n, err
+ }
+
+ token = newToken(TokenLit, b[:n], QuotedStringType)
+ } else if isNumberValue(b) {
+ var base int
+ base, n, err = getNumericalValue(b)
+ if err != nil {
+ return token, 0, err
+ }
+
+ value := b[:n]
+ vType := IntegerType
+ if contains(value, '.') || hasExponent(value) {
+ vType = DecimalType
+ }
+ token = newToken(TokenLit, value, vType)
+ token.base = base
+ } else if isBoolValue(b) {
+ n, err = getBoolValue(b)
+
+ token = newToken(TokenLit, b[:n], BoolType)
+ } else {
+ n, err = getValue(b)
+ token = newToken(TokenLit, b[:n], StringType)
+ }
+
+ return token, n, err
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() int64 {
+ return v.integer
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() float64 {
+ return v.decimal
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() bool {
+ return v.boolean
+}
+
+func isTrimmable(r rune) bool {
+ switch r {
+ case '\n', ' ':
+ return true
+ }
+ return false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ switch v.Type {
+ case StringType:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ case QuotedStringType:
+ // preserve all characters in the quotes
+ return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
+ default:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ }
+}
+
+func contains(runes []rune, c rune) bool {
+ for i := 0; i < len(runes); i++ {
+ if runes[i] == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+func runeCompare(v1 []rune, v2 []rune) bool {
+ if len(v1) != len(v2) {
+ return false
+ }
+
+ for i := 0; i < len(v1); i++ {
+ if v1[i] != v2[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go
new file mode 100644
index 0000000..e52ac39
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go
@@ -0,0 +1,30 @@
+package ini
+
+func isNewline(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ if b[0] == '\n' {
+ return true
+ }
+
+ if len(b) < 2 {
+ return false
+ }
+
+ return b[0] == '\r' && b[1] == '\n'
+}
+
+func newNewlineToken(b []rune) (Token, int, error) {
+ i := 1
+ if b[0] == '\r' && isNewline(b[1:]) {
+ i++
+ }
+
+ if !isNewline([]rune(b[:i])) {
+ return emptyToken, 0, NewParseError("invalid new line token")
+ }
+
+ return newToken(TokenNL, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go
new file mode 100644
index 0000000..a45c0bc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go
@@ -0,0 +1,152 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+const (
+ none = numberFormat(iota)
+ binary
+ octal
+ decimal
+ hex
+ exponent
+)
+
+type numberFormat int
+
+// numberHelper is used to dictate what format a number is in
+// and what to do for negative values. Since -1e-4 is a valid
+// number, we cannot just simply check for duplicate negatives.
+type numberHelper struct {
+ numberFormat numberFormat
+
+ negative bool
+ negativeExponent bool
+}
+
+func (b numberHelper) Exists() bool {
+ return b.numberFormat != none
+}
+
+func (b numberHelper) IsNegative() bool {
+ return b.negative || b.negativeExponent
+}
+
+func (b *numberHelper) Determine(c rune) error {
+ if b.Exists() {
+ return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
+ }
+
+ switch c {
+ case 'b':
+ b.numberFormat = binary
+ case 'o':
+ b.numberFormat = octal
+ case 'x':
+ b.numberFormat = hex
+ case 'e', 'E':
+ b.numberFormat = exponent
+ case '-':
+ if b.numberFormat != exponent {
+ b.negative = true
+ } else {
+ b.negativeExponent = true
+ }
+ case '.':
+ b.numberFormat = decimal
+ default:
+ return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
+ }
+
+ return nil
+}
+
+func (b numberHelper) CorrectByte(c rune) bool {
+ switch {
+ case b.numberFormat == binary:
+ if !isBinaryByte(c) {
+ return false
+ }
+ case b.numberFormat == octal:
+ if !isOctalByte(c) {
+ return false
+ }
+ case b.numberFormat == hex:
+ if !isHexByte(c) {
+ return false
+ }
+ case b.numberFormat == decimal:
+ if !isDigit(c) {
+ return false
+ }
+ case b.numberFormat == exponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negativeExponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negative:
+ if !isDigit(c) {
+ return false
+ }
+ default:
+ if !isDigit(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b numberHelper) Base() int {
+ switch b.numberFormat {
+ case binary:
+ return 2
+ case octal:
+ return 8
+ case hex:
+ return 16
+ default:
+ return 10
+ }
+}
+
+func (b numberHelper) String() string {
+ buf := bytes.Buffer{}
+ i := 0
+
+ switch b.numberFormat {
+ case binary:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": binary format\n")
+ case octal:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": octal format\n")
+ case hex:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": hex format\n")
+ case exponent:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
+ default:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": integer format\n")
+ }
+
+ if b.negative {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative format\n")
+ }
+
+ if b.negativeExponent {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go
new file mode 100644
index 0000000..8a84c7c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go
@@ -0,0 +1,39 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ equalOp = []rune("=")
+ equalColonOp = []rune(":")
+)
+
+func isOp(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '=':
+ return true
+ case ':':
+ return true
+ default:
+ return false
+ }
+}
+
+func newOpToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '=':
+ tok = newToken(TokenOp, equalOp, NoneType)
+ case ':':
+ tok = newToken(TokenOp, equalColonOp, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go
new file mode 100644
index 0000000..4572870
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go
@@ -0,0 +1,43 @@
+package ini
+
+import "fmt"
+
+const (
+ // ErrCodeParseError is returned when a parsing error
+ // has occurred.
+ ErrCodeParseError = "INIParseError"
+)
+
+// ParseError is an error which is returned during any part of
+// the parsing process.
+type ParseError struct {
+ msg string
+}
+
+// NewParseError will return a new ParseError where message
+// is the description of the error.
+func NewParseError(message string) *ParseError {
+ return &ParseError{
+ msg: message,
+ }
+}
+
+// Code will return the ErrCodeParseError
+func (err *ParseError) Code() string {
+ return ErrCodeParseError
+}
+
+// Message returns the error's message
+func (err *ParseError) Message() string {
+ return err.msg
+}
+
+// OrigError return nothing since there will never be any
+// original error.
+func (err *ParseError) OrigError() error {
+ return nil
+}
+
+func (err *ParseError) Error() string {
+ return fmt.Sprintf("%s: %s", err.Code(), err.Message())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go
new file mode 100644
index 0000000..7f01cf7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go
@@ -0,0 +1,60 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// ParseStack is a stack that contains a container, the stack portion,
+// and the list which is the list of ASTs that have been successfully
+// parsed.
+type ParseStack struct {
+ top int
+ container []AST
+ list []AST
+ index int
+}
+
+func newParseStack(sizeContainer, sizeList int) ParseStack {
+ return ParseStack{
+ container: make([]AST, sizeContainer),
+ list: make([]AST, sizeList),
+ }
+}
+
+// Pop will return and truncate the last container element.
+func (s *ParseStack) Pop() AST {
+ s.top--
+ return s.container[s.top]
+}
+
+// Push will add the new AST to the container
+func (s *ParseStack) Push(ast AST) {
+ s.container[s.top] = ast
+ s.top++
+}
+
+// MarkComplete will append the AST to the list of completed statements
+func (s *ParseStack) MarkComplete(ast AST) {
+ s.list[s.index] = ast
+ s.index++
+}
+
+// List will return the completed statements
+func (s ParseStack) List() []AST {
+ return s.list[:s.index]
+}
+
+// Len will return the length of the container
+func (s *ParseStack) Len() int {
+ return s.top
+}
+
+func (s ParseStack) String() string {
+ buf := bytes.Buffer{}
+ for i, node := range s.list {
+ buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go
new file mode 100644
index 0000000..f82095b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go
@@ -0,0 +1,41 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ emptyRunes = []rune{}
+)
+
+func isSep(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+var (
+ openBrace = []rune("[")
+ closeBrace = []rune("]")
+)
+
+func newSepToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '[':
+ tok = newToken(TokenSep, openBrace, NoneType)
+ case ']':
+ tok = newToken(TokenSep, closeBrace, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go
new file mode 100644
index 0000000..b72ea2f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go
@@ -0,0 +1,42 @@
+package ini
+
+// skipper is used to skip certain blocks of an ini file.
+// Currently skipper is used to skip nested blocks of ini
+// files. See example below
+//
+// [ foo ]
+// nested = // this section will be skipped
+// a=b
+// c=d
+// bar=baz // this will be included
+type skipper struct {
+ shouldSkip bool
+ TokenSet bool
+ prevTok Token
+}
+
+func newSkipper() skipper {
+ return skipper{
+ prevTok: emptyToken,
+ }
+}
+
+func (s *skipper) ShouldSkip(tok Token) bool {
+ if s.shouldSkip && s.prevTok.Type() == TokenNL && tok.Type() != TokenWS {
+ s.Continue()
+ return false
+ }
+ s.prevTok = tok
+
+ return s.shouldSkip
+}
+
+func (s *skipper) Skip() {
+ s.shouldSkip = true
+ s.prevTok = emptyToken
+}
+
+func (s *skipper) Continue() {
+ s.shouldSkip = false
+ s.prevTok = emptyToken
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go
new file mode 100644
index 0000000..ea86fea
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go
@@ -0,0 +1,36 @@
+package ini
+
+// Statement is an empty AST mostly used for transitioning states.
+func newStatement() AST {
+ return newAST(ASTKindStatement, AST{})
+}
+
+// SectionStatement represents a section AST
+func newSectionStatement(tok Token) AST {
+ return newASTWithRootToken(ASTKindSectionStatement, tok)
+}
+
+// ExprStatement represents a completed expression AST
+func newExprStatement(ast AST) AST {
+ return newAST(ASTKindExprStatement, ast)
+}
+
+// CommentStatement represents a comment in the ini defintion.
+//
+// grammar:
+// comment -> #comment' | ;comment' | /comment_slash
+// comment_slash -> /comment'
+// comment' -> value
+func newCommentStatement(tok Token) AST {
+ return newAST(ASTKindCommentStatement, newExpression(tok))
+}
+
+// CompletedSectionStatement represents a completed section
+func newCompletedSectionStatement(ast AST) AST {
+ return newAST(ASTKindCompletedSectionStatement, ast)
+}
+
+// SkipStatement is used to skip whole statements
+func newSkipStatement(ast AST) AST {
+ return newAST(ASTKindSkipStatement, ast)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go
new file mode 100644
index 0000000..305999d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go
@@ -0,0 +1,284 @@
+package ini
+
+import (
+ "fmt"
+)
+
+// getStringValue will return a quoted string and the amount
+// of bytes read
+//
+// an error will be returned if the string is not properly formatted
+func getStringValue(b []rune) (int, error) {
+ if b[0] != '"' {
+ return 0, NewParseError("strings must start with '\"'")
+ }
+
+ endQuote := false
+ i := 1
+
+ for ; i < len(b) && !endQuote; i++ {
+ if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
+ endQuote = true
+ break
+ } else if escaped {
+ /*c, err := getEscapedByte(b[i])
+ if err != nil {
+ return 0, err
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--*/
+
+ continue
+ }
+ }
+
+ if !endQuote {
+ return 0, NewParseError("missing '\"' in string value")
+ }
+
+ return i + 1, nil
+}
+
+// getBoolValue will return a boolean and the amount
+// of bytes read
+//
+// an error will be returned if the boolean is not of a correct
+// value
+func getBoolValue(b []rune) (int, error) {
+ if len(b) < 4 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ n := 0
+ for _, lv := range literalValues {
+ if len(lv) > len(b) {
+ continue
+ }
+
+ if isLitValue(lv, b) {
+ n = len(lv)
+ }
+ }
+
+ if n == 0 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ return n, nil
+}
+
+// getNumericalValue will return a numerical string, the amount
+// of bytes read, and the base of the number
+//
+// an error will be returned if the number is not of a correct
+// value
+func getNumericalValue(b []rune) (int, int, error) {
+ if !isDigit(b[0]) {
+ return 0, 0, NewParseError("invalid digit value")
+ }
+
+ i := 0
+ helper := numberHelper{}
+
+loop:
+ for negativeIndex := 0; i < len(b); i++ {
+ negativeIndex++
+
+ if !isDigit(b[i]) {
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return 0, 0, NewParseError("parse error '-'")
+ }
+
+ n := getNegativeNumber(b[i:])
+ i += (n - 1)
+ helper.Determine(b[i])
+ continue
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+
+ negativeIndex = 0
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ if i == 0 && b[i] != '0' {
+ return 0, 0, NewParseError("incorrect base format, expected leading '0'")
+ }
+
+ if i != 1 {
+ return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
+ }
+
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ default:
+ if isWhitespace(b[i]) {
+ break loop
+ }
+
+ if isNewline(b[i:]) {
+ break loop
+ }
+
+ if !(helper.numberFormat == hex && isHexByte(b[i])) {
+ if i+2 < len(b) && !isNewline(b[i:i+2]) {
+ return 0, 0, NewParseError("invalid numerical character")
+ } else if !isNewline([]rune{b[i]}) {
+ return 0, 0, NewParseError("invalid numerical character")
+ }
+
+ break loop
+ }
+ }
+ }
+ }
+
+ return helper.Base(), i, nil
+}
+
+// isDigit will return whether or not something is an integer
+func isDigit(b rune) bool {
+ return b >= '0' && b <= '9'
+}
+
+func hasExponent(v []rune) bool {
+ return contains(v, 'e') || contains(v, 'E')
+}
+
+func isBinaryByte(b rune) bool {
+ switch b {
+ case '0', '1':
+ return true
+ default:
+ return false
+ }
+}
+
+func isOctalByte(b rune) bool {
+ switch b {
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ return true
+ default:
+ return false
+ }
+}
+
+func isHexByte(b rune) bool {
+ if isDigit(b) {
+ return true
+ }
+ return (b >= 'A' && b <= 'F') ||
+ (b >= 'a' && b <= 'f')
+}
+
+func getValue(b []rune) (int, error) {
+ i := 0
+
+ for i < len(b) {
+ if isNewline(b[i:]) {
+ break
+ }
+
+ if isOp(b[i:]) {
+ break
+ }
+
+ valid, n, err := isValid(b[i:])
+ if err != nil {
+ return 0, err
+ }
+
+ if !valid {
+ break
+ }
+
+ i += n
+ }
+
+ return i, nil
+}
+
+// getNegativeNumber will return a negative number from a
+// byte slice. This will iterate through all characters until
+// a non-digit has been found.
+func getNegativeNumber(b []rune) int {
+ if b[0] != '-' {
+ return 0
+ }
+
+ i := 1
+ for ; i < len(b); i++ {
+ if !isDigit(b[i]) {
+ return i
+ }
+ }
+
+ return i
+}
+
+// isEscaped will return whether or not the character is an escaped
+// character.
+func isEscaped(value []rune, b rune) bool {
+ if len(value) == 0 {
+ return false
+ }
+
+ switch b {
+ case '\'': // single quote
+ case '"': // quote
+ case 'n': // newline
+ case 't': // tab
+ case '\\': // backslash
+ default:
+ return false
+ }
+
+ return value[len(value)-1] == '\\'
+}
+
+func getEscapedByte(b rune) (rune, error) {
+ switch b {
+ case '\'': // single quote
+ return '\'', nil
+ case '"': // quote
+ return '"', nil
+ case 'n': // newline
+ return '\n', nil
+ case 't': // table
+ return '\t', nil
+ case '\\': // backslash
+ return '\\', nil
+ default:
+ return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
+ }
+}
+
+func removeEscapedCharacters(b []rune) []rune {
+ for i := 0; i < len(b); i++ {
+ if isEscaped(b[:i], b[i]) {
+ c, err := getEscapedByte(b[i])
+ if err != nil {
+ return b
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--
+ }
+ }
+
+ return b
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go
new file mode 100644
index 0000000..c710170
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go
@@ -0,0 +1,155 @@
+package ini
+
+import (
+ "fmt"
+ "sort"
+)
+
+// Visitor is an interface used by walkers that will
+// traverse an array of ASTs.
+type Visitor interface {
+ VisitExpr(AST) error
+ VisitStatement(AST) error
+}
+
+// DefaultVisitor is used to visit statements and expressions
+// and ensure that they are both of the correct format.
+// In addition, upon visiting this will build sections and populate
+// the Sections field which can be used to retrieve profile
+// configuration.
+type DefaultVisitor struct {
+ scope string
+ Sections Sections
+}
+
+// NewDefaultVisitor return a DefaultVisitor
+func NewDefaultVisitor() *DefaultVisitor {
+ return &DefaultVisitor{
+ Sections: Sections{
+ container: map[string]Section{},
+ },
+ }
+}
+
+// VisitExpr visits expressions...
+func (v *DefaultVisitor) VisitExpr(expr AST) error {
+ t := v.Sections.container[v.scope]
+ if t.values == nil {
+ t.values = values{}
+ }
+
+ switch expr.Kind {
+ case ASTKindExprStatement:
+ opExpr := expr.GetRoot()
+ switch opExpr.Kind {
+ case ASTKindEqualExpr:
+ children := opExpr.GetChildren()
+ if len(children) <= 1 {
+ return NewParseError("unexpected token type")
+ }
+
+ rhs := children[1]
+
+ if rhs.Root.Type() != TokenLit {
+ return NewParseError("unexpected token type")
+ }
+
+ key := EqualExprKey(opExpr)
+ v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
+ if err != nil {
+ return err
+ }
+
+ t.values[key] = v
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+
+ v.Sections.container[v.scope] = t
+ return nil
+}
+
+// VisitStatement visits statements...
+func (v *DefaultVisitor) VisitStatement(stmt AST) error {
+ switch stmt.Kind {
+ case ASTKindCompletedSectionStatement:
+ child := stmt.GetRoot()
+ if child.Kind != ASTKindSectionStatement {
+ return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
+ }
+
+ name := string(child.Root.Raw())
+ v.Sections.container[name] = Section{}
+ v.scope = name
+ default:
+ return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
+ }
+
+ return nil
+}
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ Name string
+ values values
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) int64 {
+ return t.values[k].IntValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) float64 {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go
new file mode 100644
index 0000000..99915f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go
@@ -0,0 +1,25 @@
+package ini
+
+// Walk will traverse the AST using the v, the Visitor.
+func Walk(tree []AST, v Visitor) error {
+ for _, node := range tree {
+ switch node.Kind {
+ case ASTKindExpr,
+ ASTKindExprStatement:
+
+ if err := v.VisitExpr(node); err != nil {
+ return err
+ }
+ case ASTKindStatement,
+ ASTKindCompletedSectionStatement,
+ ASTKindNestedSectionStatement,
+ ASTKindCompletedNestedSectionStatement:
+
+ if err := v.VisitStatement(node); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go
new file mode 100644
index 0000000..7ffb4ae
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go
@@ -0,0 +1,24 @@
+package ini
+
+import (
+ "unicode"
+)
+
+// isWhitespace will return whether or not the character is
+// a whitespace character.
+//
+// Whitespace is defined as a space or tab.
+func isWhitespace(c rune) bool {
+ return unicode.IsSpace(c) && c != '\n' && c != '\r'
+}
+
+func newWSToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if !isWhitespace(b[i]) {
+ break
+ }
+ }
+
+ return newToken(TokenWS, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go
new file mode 100644
index 0000000..2b42cbe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go
@@ -0,0 +1,9 @@
+package sdk
+
+// Invalidator provides access to a type's invalidate method to make it
+// invalidate it cache.
+//
+// e.g aws.SafeCredentialsProvider's Invalidate method.
+type Invalidator interface {
+ Invalidate()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go
new file mode 100644
index 0000000..e5640ab
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go
@@ -0,0 +1,44 @@
+package sdk
+
+import (
+ "context"
+ "time"
+)
+
+func init() {
+ NowTime = time.Now
+ Sleep = time.Sleep
+ SleepWithContext = DefaultSleepWithContext
+}
+
+// NowTime is a value for getting the current time. This value can be overriden
+// for testing mocking out current time.
+var NowTime func() time.Time
+
+// Sleep is a value for sleeping for a duration. This value can be overriden
+// for testing and mocking out sleep duration.
+var Sleep func(time.Duration)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// This value can be overriden for testing and mocking out sleep duration.
+var SleepWithContext func(context.Context, time.Duration) error
+
+// DefaultSleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+func DefaultSleepWithContext(ctx context.Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/encode.go
new file mode 100644
index 0000000..dc3d129
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/encode.go
@@ -0,0 +1,95 @@
+package protocol
+
+import (
+ "io"
+)
+
+// A FieldMarshaler interface is used to marshal struct fields when encoding.
+type FieldMarshaler interface {
+ MarshalFields(FieldEncoder) error
+}
+
+// FieldMarshalerFunc is a helper utility that wrapps a function and allows
+// that function to be called as a FieldMarshaler.
+type FieldMarshalerFunc func(FieldEncoder) error
+
+// MarshalFields will call the underlying function passing in the field encoder
+// with the protocol field encoder.
+func (fn FieldMarshalerFunc) MarshalFields(e FieldEncoder) error {
+ return fn(e)
+}
+
+// ValueMarshaler provides a generic type for all encoding field values to be
+// passed into a encoder's methods with.
+type ValueMarshaler interface {
+ MarshalValue() (string, error)
+ MarshalValueBuf([]byte) ([]byte, error)
+}
+
+// A StreamMarshaler interface is used to marshal a stream when encoding.
+type StreamMarshaler interface {
+ MarshalStream() (io.ReadSeeker, error)
+}
+
+// MarshalListValues is a marshaler for list encoders.
+type MarshalListValues interface {
+ MarshalValues(enc ListEncoder) error
+}
+
+// MapMarshaler is a marshaler for map encoders.
+type MapMarshaler interface {
+ MarshalValues(enc MapEncoder) error
+}
+
+// A ListEncoder provides the interface for encoders that will encode List elements.
+type ListEncoder interface {
+ Start()
+ End()
+
+ Map() MapEncoder
+ List() ListEncoder
+
+ ListAddValue(v ValueMarshaler)
+ ListAddFields(m FieldMarshaler)
+}
+
+// A MapEncoder provides the interface for encoders that will encode map elements.
+type MapEncoder interface {
+ Start()
+ End()
+
+ Map(k string) MapEncoder
+ List(k string) ListEncoder
+
+ MapSetValue(k string, v ValueMarshaler)
+ MapSetFields(k string, m FieldMarshaler)
+}
+
+// A FieldEncoder provides the interface for encoding struct field members.
+type FieldEncoder interface {
+ SetValue(t Target, k string, m ValueMarshaler, meta Metadata)
+ SetStream(t Target, k string, m StreamMarshaler, meta Metadata)
+ SetFields(t Target, k string, m FieldMarshaler, meta Metadata)
+
+ Map(t Target, k string, meta Metadata) MapEncoder
+ List(t Target, k string, meta Metadata) ListEncoder
+}
+
+// A FieldBuffer provides buffering of fields so the number of
+// allocations are reduced by providng a persistent buffer that is
+// used between fields.
+type FieldBuffer struct {
+ buf []byte
+}
+
+// GetValue will retrieve the ValueMarshaler's value by appending the
+// value to the buffer. Will return the buffer that was populated.
+//
+// This buffer is only valid until the next time GetValue is called.
+func (b *FieldBuffer) GetValue(m ValueMarshaler) ([]byte, error) {
+ v, err := m.MarshalValueBuf(b.buf)
+ if len(v) > len(b.buf) {
+ b.buf = v
+ }
+ return v, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/fields.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/fields.go
new file mode 100644
index 0000000..b131e22
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/fields.go
@@ -0,0 +1,197 @@
+package protocol
+
+import (
+ "bytes"
+ "encoding/base64"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// QuotedValue represents a type that should be quoted when encoding
+// a string value.
+type QuotedValue struct {
+ ValueMarshaler
+}
+
+// BoolValue provies encoding of bool for AWS protocols.
+type BoolValue bool
+
+// MarshalValue formats the value into a string for encoding.
+func (v BoolValue) MarshalValue() (string, error) {
+ return strconv.FormatBool(bool(v)), nil
+}
+
+// MarshalValueBuf formats the value into a byte slice for encoding.
+// If there is enough room in the passed in slice v will be appended to it.
+//
+// Will reset the length of the passed in slice to 0.
+func (v BoolValue) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return strconv.AppendBool(b, bool(v)), nil
+}
+
+// BytesValue provies encoding of string for AWS protocols.
+type BytesValue string
+
+// MarshalValue formats the value into a string for encoding.
+func (v BytesValue) MarshalValue() (string, error) {
+ return base64.StdEncoding.EncodeToString([]byte(v)), nil
+}
+
+// MarshalValueBuf formats the value into a byte slice for encoding.
+// If there is enough room in the passed in slice v will be appended to it.
+func (v BytesValue) MarshalValueBuf(b []byte) ([]byte, error) {
+ m := []byte(v)
+
+ n := base64.StdEncoding.EncodedLen(len(m))
+ if len(b) < n {
+ b = make([]byte, n)
+ }
+ base64.StdEncoding.Encode(b, m)
+ return b[:n], nil
+}
+
+// StringValue provies encoding of string for AWS protocols.
+type StringValue string
+
+// MarshalValue formats the value into a string for encoding.
+func (v StringValue) MarshalValue() (string, error) {
+ return string(v), nil
+}
+
+// MarshalValueBuf formats the value into a byte slice for encoding.
+// If there is enough room in the passed in slice v will be appended to it.
+//
+// Will reset the length of the passed in slice to 0.
+func (v StringValue) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, v...), nil
+}
+
+// Int64Value provies encoding of int64 for AWS protocols.
+type Int64Value int64
+
+// MarshalValue formats the value into a string for encoding.
+func (v Int64Value) MarshalValue() (string, error) {
+ return strconv.FormatInt(int64(v), 10), nil
+}
+
+// MarshalValueBuf formats the value into a byte slice for encoding.
+// If there is enough room in the passed in slice v will be appended to it.
+//
+// Will reset the length of the passed in slice to 0.
+func (v Int64Value) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return strconv.AppendInt(b, int64(v), 10), nil
+}
+
+// Float64Value provies encoding of float64 for AWS protocols.
+type Float64Value float64
+
+// MarshalValue formats the value into a string for encoding.
+func (v Float64Value) MarshalValue() (string, error) {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64), nil
+}
+
+// MarshalValueBuf formats the value into a byte slice for encoding.
+// If there is enough room in the passed in slice v will be appended to it.
+//
+// Will reset the length of the passed in slice to 0.
+func (v Float64Value) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return strconv.AppendFloat(b, float64(v), 'f', -1, 64), nil
+}
+
+// JSONValue provies encoding of aws.JSONValues for AWS protocols.
+type JSONValue struct {
+ V aws.JSONValue
+ EscapeMode EscapeMode
+}
+
+// MarshalValue formats the value into a string for encoding.
+func (v JSONValue) MarshalValue() (string, error) {
+ return EncodeJSONValue(v.V, v.EscapeMode)
+}
+
+// MarshalValueBuf formats the value into a byte slice for encoding.
+// If there is enough room in the passed in slice v will be appended to it.
+//
+// Will reset the length of the passed in slice to 0.
+func (v JSONValue) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+
+ m, err := EncodeJSONValue(v.V, v.EscapeMode)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(b, []byte(m)...), nil
+}
+
+// Time formats for protocol time fields.
+const (
+ ISO8601TimeFormat = "2006-01-02T15:04:05Z" // ISO 8601 formated time.
+ RFC822TimeFromat = "Mon, 2 Jan 2006 15:04:05 GMT" // RFC822 formatted time.
+ UnixTimeFormat = "unix time format" // Special case for Unix time
+)
+
+// TimeValue provies encoding of time.Time for AWS protocols.
+type TimeValue struct {
+ V time.Time
+ Format string
+}
+
+// MarshalValue formats the value into a string givin a format for encoding.
+func (v TimeValue) MarshalValue() (string, error) {
+ t := time.Time(v.V)
+
+ if v.Format == UnixTimeFormat {
+ return strconv.FormatInt(t.UTC().Unix(), 10), nil
+ }
+ return t.UTC().Format(v.Format), nil
+}
+
+// MarshalValueBuf formats the value into a byte slice for encoding.
+// If there is enough room in the passed in slice v will be appended to it.
+//
+// Will reset the length of the passed in slice to 0.
+func (v TimeValue) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+
+ m, err := v.MarshalValue()
+ if err != nil {
+ return nil, err
+ }
+
+ return append(b, m...), nil
+}
+
+// A ReadSeekerStream wrapps an io.ReadSeeker to be used as a StreamMarshaler.
+type ReadSeekerStream struct {
+ V io.ReadSeeker
+}
+
+// MarshalStream returns the wrapped io.ReadSeeker for encoding.
+func (v ReadSeekerStream) MarshalStream() (io.ReadSeeker, error) {
+ return v.V, nil
+}
+
+// A BytesStream aliases a byte slice to be used as a StreamMarshaler.
+type BytesStream []byte
+
+// MarshalStream marshals a byte slice into an io.ReadSeeker for encoding.
+func (v BytesStream) MarshalStream() (io.ReadSeeker, error) {
+ return bytes.NewReader([]byte(v)), nil
+}
+
+// A StringStream aliases a string to be used as a StreamMarshaler.
+type StringStream string
+
+// MarshalStream marshals a string into an io.ReadSeeker for encoding.
+func (v StringStream) MarshalStream() (io.ReadSeeker, error) {
+ return strings.NewReader(string(v)), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/header_encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/header_encoder.go
new file mode 100644
index 0000000..38bae69
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/header_encoder.go
@@ -0,0 +1,116 @@
+package protocol
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// HeaderMapEncoder builds a map valu
+type HeaderMapEncoder struct {
+ Prefix string
+ Header http.Header
+ Err error
+}
+
+// MapSetValue adds a single value to the header.
+func (e *HeaderMapEncoder) MapSetValue(k string, v ValueMarshaler) {
+ if e.Err != nil {
+ return
+ }
+
+ str, err := v.MarshalValue()
+ if err != nil {
+ e.Err = err
+ return
+ }
+
+ if len(e.Prefix) > 0 {
+ k = e.Prefix + k
+ }
+
+ e.Header.Set(k, str)
+}
+
+// List executes the passed in callback with a list encoder based on
+// the context of this HeaderMapEncoder.
+func (e *HeaderMapEncoder) List(k string) ListEncoder {
+ if e.Err != nil {
+ return nil
+ }
+
+ if len(e.Prefix) > 0 {
+ k = e.Prefix + k
+ }
+
+ return &HeaderListEncoder{Key: k, Header: e.Header}
+}
+
+// Map sets the header element with nested maps appending the
+// passed in k to the prefix if one was set.
+func (e *HeaderMapEncoder) Map(k string) MapEncoder {
+ if e.Err != nil {
+ return nil
+ }
+
+ if len(e.Prefix) > 0 {
+ k = e.Prefix + k
+ }
+
+ return &HeaderMapEncoder{Prefix: k, Header: e.Header}
+}
+
+// Start does nothing for header encodings.
+func (e *HeaderMapEncoder) Start() {}
+
+// End does nothing for header encodings.
+func (e *HeaderMapEncoder) End() {}
+
+// MapSetFields Is not implemented, query map of FieldMarshaler is undefined.
+func (e *HeaderMapEncoder) MapSetFields(k string, m FieldMarshaler) {
+ e.Err = fmt.Errorf("header map encoder MapSetFields not supported, %s", k)
+}
+
+// HeaderListEncoder will encode list values nested into a header key.
+type HeaderListEncoder struct {
+ Key string
+ Header http.Header
+ Err error
+}
+
+// ListAddValue encodes an individual list value into the header.
+func (e *HeaderListEncoder) ListAddValue(v ValueMarshaler) {
+ if e.Err != nil {
+ return
+ }
+
+ str, err := v.MarshalValue()
+ if err != nil {
+ e.Err = err
+ return
+ }
+
+ e.Header.Add(e.Key, str)
+}
+
+// List Is not implemented, header list of list is undefined.
+func (e *HeaderListEncoder) List() ListEncoder {
+ e.Err = fmt.Errorf("header list encoder ListAddList not supported, %s", e.Key)
+ return nil
+}
+
+// Map Is not implemented, header list of map is undefined.
+func (e *HeaderListEncoder) Map() MapEncoder {
+ e.Err = fmt.Errorf("header list encoder ListAddMap not supported, %s", e.Key)
+ return nil
+}
+
+// Start does nothing for header list encodings.
+func (e *HeaderListEncoder) Start() {}
+
+// End does nothing for header list encodings.
+func (e *HeaderListEncoder) End() {}
+
+// ListAddFields Is not implemented, query list of FieldMarshaler is undefined.
+func (e *HeaderListEncoder) ListAddFields(m FieldMarshaler) {
+ e.Err = fmt.Errorf("header list encoder ListAddFields not supported, %s", e.Key)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/idempotency.go
new file mode 100644
index 0000000..53831df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "fmt"
+ "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+ switch u := v.Interface().(type) {
+ // To auto fill an Idempotency token the field must be a string,
+ // tagged for auto fill, and have a zero value.
+ case *string:
+ return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ case string:
+ return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ }
+
+ return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+ b := make([]byte, 16)
+ RandReader.Read(b)
+
+ return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() && v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = reflect.Indirect(v)
+
+ if !v.CanSet() {
+ panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+ }
+
+ b := make([]byte, 16)
+ _, err := rand.Read(b)
+ if err != nil {
+ // TODO handle error
+ return
+ }
+
+ v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+ // 13th character is "4"
+ u[6] = (u[6] | 0x40) & 0x4F
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] | 0x80) & 0xBF
+
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/build.go
new file mode 100644
index 0000000..029be32
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/build.go
@@ -0,0 +1,293 @@
+// Package jsonutil provides JSON serialization of AWS requests and responses.
+package jsonutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/private/protocol"
+)
+
+var timeType = reflect.ValueOf(time.Time{}).Type()
+var byteSliceType = reflect.ValueOf([]byte{}).Type()
+
+// BuildJSON builds a JSON string for a given object v.
+func BuildJSON(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+
+ err := buildAny(reflect.ValueOf(v), &buf, "", false)
+ return buf.Bytes(), err
+}
+
+func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag, parentCollection bool) error {
+ origVal := value
+ value = reflect.Indirect(value)
+ if !value.IsValid() {
+ return nil
+ }
+
+ vtype := value.Type()
+
+ t := tag.Get("type")
+ if t == "" {
+ switch vtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if value.Type() != timeType {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := value.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ // cannot be a JSONValue map
+ if _, ok := value.Interface().(aws.JSONValue); !ok {
+ t = "map"
+ }
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := vtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return buildStruct(value, buf, tag)
+ case "list":
+ return buildList(value, buf, tag)
+ case "map":
+ return buildMap(value, buf, tag)
+ default:
+ return buildScalar(origVal, buf, tag, parentCollection)
+ }
+}
+
+func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ buf.WriteByte('{')
+
+ t := value.Type()
+ first := true
+ for i := 0; i < t.NumField(); i++ {
+ member := value.Field(i)
+
+ // This allocates the most memory.
+ // Additionally, we cannot skip nil fields due to
+ // idempotency auto filling.
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("json") == "-" {
+ continue
+ }
+ if field.Tag.Get("location") != "" {
+ continue // ignore non-body elements
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(member, field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(&token)
+ }
+
+ if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
+ continue // ignore unset fields
+ } else if member.Kind() == reflect.String && member.Len() == 0 {
+ continue
+ }
+
+ if first {
+ first = false
+ } else {
+ buf.WriteByte(',')
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ writeString(name, buf)
+ buf.WriteString(`:`)
+
+ err := buildAny(member, buf, field.Tag, false)
+ if err != nil {
+ return err
+ }
+
+ }
+
+ buf.WriteString("}")
+
+ return nil
+}
+
+func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ buf.WriteString("[")
+
+ for i := 0; i < value.Len(); i++ {
+ elem := value.Index(i)
+ buildAny(elem, buf, "", true)
+
+ if i < value.Len()-1 {
+ buf.WriteString(",")
+ }
+ }
+
+ buf.WriteString("]")
+
+ return nil
+}
+
+type sortedValues []reflect.Value
+
+func (sv sortedValues) Len() int { return len(sv) }
+func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
+
+func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ buf.WriteString("{")
+
+ sv := sortedValues(value.MapKeys())
+ sort.Sort(sv)
+
+ for i, k := range sv {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+
+ writeString(k.String(), buf)
+ buf.WriteString(`:`)
+
+ buildAny(value.MapIndex(k), buf, "", true)
+ }
+
+ buf.WriteString("}")
+
+ return nil
+}
+
+func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag, parentCollection bool) error {
+ // prevents allocation on the heap.
+ scratch := [64]byte{}
+ switch value := reflect.Indirect(v); value.Kind() {
+ case reflect.String:
+ str := value.String()
+ isEnum := len(tag.Get("enum")) != 0
+ if parentCollection || (len(str) > 0 && isEnum) || !isEnum {
+ writeString(str, buf)
+ }
+ case reflect.Bool:
+ if value.Bool() {
+ buf.WriteString("true")
+ } else {
+ buf.WriteString("false")
+ }
+ case reflect.Int64:
+ buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
+ case reflect.Float64:
+ f := value.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
+ }
+ buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
+ default:
+ switch converted := value.Interface().(type) {
+ case time.Time:
+ buf.Write(strconv.AppendInt(scratch[:0], converted.UTC().Unix(), 10))
+ case []byte:
+ if !value.IsNil() {
+ buf.WriteByte('"')
+ if len(converted) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
+ base64.StdEncoding.Encode(dst, converted)
+ buf.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, buf)
+ enc.Write(converted)
+ enc.Close()
+ }
+ buf.WriteByte('"')
+ }
+ case aws.JSONValue:
+ str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape)
+ if err != nil {
+ return fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ buf.WriteString(str)
+ default:
+ return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
+ }
+ }
+ return nil
+}
+
+var hex = "0123456789abcdef"
+
+func writeString(s string, buf *bytes.Buffer) {
+ buf.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ if s[i] == '"' {
+ buf.WriteString(`\"`)
+ } else if s[i] == '\\' {
+ buf.WriteString(`\\`)
+ } else if s[i] == '\b' {
+ buf.WriteString(`\b`)
+ } else if s[i] == '\f' {
+ buf.WriteString(`\f`)
+ } else if s[i] == '\r' {
+ buf.WriteString(`\r`)
+ } else if s[i] == '\t' {
+ buf.WriteString(`\t`)
+ } else if s[i] == '\n' {
+ buf.WriteString(`\n`)
+ } else if s[i] < 32 {
+ buf.WriteString("\\u00")
+ buf.WriteByte(hex[s[i]>>4])
+ buf.WriteByte(hex[s[i]&0xF])
+ } else {
+ buf.WriteByte(s[i])
+ }
+ }
+ buf.WriteByte('"')
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/unmarshal.go
new file mode 100644
index 0000000..dce3a67
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil/unmarshal.go
@@ -0,0 +1,242 @@
+package jsonutil
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "reflect"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/private/protocol"
+)
+
+// UnmarshalJSON reads a stream and unmarshals the results in object v.
+func UnmarshalJSON(v interface{}, stream io.Reader) error {
+ var out interface{}
+
+ b, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return err
+ }
+
+ if len(b) == 0 {
+ return nil
+ }
+
+ if err := json.Unmarshal(b, &out); err != nil {
+ return err
+ }
+
+ return unmarshalAny(reflect.ValueOf(v), out, "")
+}
+
+func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ vtype := value.Type()
+ if vtype.Kind() == reflect.Ptr {
+ vtype = vtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch vtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ _, tok := value.Interface().(*time.Time)
+ if _, ok := value.Interface().(time.Time); !(ok || tok) {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := value.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ // cannot be a JSONValue map
+ if _, ok := value.Interface().(aws.JSONValue); !ok {
+ t = "map"
+ }
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := vtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return unmarshalStruct(value, data, tag)
+ case "list":
+ return unmarshalList(value, data, tag)
+ case "map":
+ return unmarshalMap(value, data, tag)
+ default:
+ return unmarshalScalar(value, data, tag)
+ }
+}
+
+func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+
+ mapData, ok := data.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a structure (%#v)", data)
+ }
+
+ t := value.Type()
+ if value.Kind() == reflect.Ptr {
+ if value.IsNil() { // create the structure if it's nil
+ s := reflect.New(value.Type().Elem())
+ value.Set(s)
+ value = s
+ }
+
+ value = value.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return unmarshalAny(value.FieldByName(payload), data, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ member := value.FieldByIndex(field.Index)
+ err := unmarshalAny(member, mapData[name], field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ listData, ok := data.([]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a list (%#v)", data)
+ }
+
+ if value.IsNil() {
+ l := len(listData)
+ value.Set(reflect.MakeSlice(value.Type(), l, l))
+ }
+
+ for i, c := range listData {
+ err := unmarshalAny(value.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ mapData, ok := data.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a map (%#v)", data)
+ }
+
+ if value.IsNil() {
+ value.Set(reflect.MakeMap(value.Type()))
+ }
+
+ for k, v := range mapData {
+ kvalue := reflect.ValueOf(k)
+ vvalue := reflect.New(value.Type().Elem()).Elem()
+ unmarshalAny(vvalue, v, "")
+ value.SetMapIndex(kvalue, vvalue)
+ }
+
+ return nil
+}
+
+func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ errf := func() error {
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+
+ switch d := data.(type) {
+ case nil:
+ return nil // nothing to do here
+ case string:
+ if value.Kind() == reflect.String {
+ value.SetString(d)
+ return nil
+ }
+
+ switch value.Interface().(type) {
+ case *string:
+ value.Set(reflect.ValueOf(&d))
+ case string:
+ value.Set(reflect.ValueOf(d))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(d)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(b))
+ case aws.JSONValue:
+ // No need to use escaping as the value is a non-quoted string.
+ v, err := protocol.DecodeJSONValue(d, protocol.NoEscape)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(v))
+ default:
+ return errf()
+ }
+ case float64:
+ switch value.Interface().(type) {
+ case *int64:
+ di := int64(d)
+ value.Set(reflect.ValueOf(&di))
+ case int64:
+ di := int64(d)
+ value.Set(reflect.ValueOf(di))
+ case *float64:
+ value.Set(reflect.ValueOf(&d))
+ case float64:
+ value.Set(reflect.ValueOf(d))
+ case *time.Time:
+ t := time.Unix(int64(d), 0).UTC()
+ value.Set(reflect.ValueOf(&t))
+ case time.Time:
+ t := time.Unix(int64(d), 0).UTC()
+ value.Set(reflect.ValueOf(t))
+ default:
+ return errf()
+ }
+ case bool:
+ switch value.Interface().(type) {
+ case *bool:
+ value.Set(reflect.ValueOf(&d))
+ default:
+ return errf()
+ }
+ default:
+ return fmt.Errorf("unsupported JSON value (%v)", data)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc/jsonrpc.go
new file mode 100644
index 0000000..d2d4f82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc/jsonrpc.go
@@ -0,0 +1,109 @@
+// Package jsonrpc provides JSON RPC utilities for serialization of AWS
+// requests and responses.
+package jsonrpc
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "strings"
+
+ request "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/private/protocol/json/jsonutil"
+ "github.com/aws/aws-sdk-go-v2/private/protocol/rest"
+)
+
+var emptyJSON = []byte("{}")
+
+// BuildHandler is a named request handler for building jsonrpc protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a JSON payload for a JSON RPC request.
+func Build(req *request.Request) {
+ var buf []byte
+ var err error
+ if req.ParamsFilled() {
+ buf, err = jsonutil.BuildJSON(req.Params)
+ if err != nil {
+ req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err)
+ return
+ }
+ } else {
+ buf = emptyJSON
+ }
+
+ if req.Metadata.TargetPrefix != "" || string(buf) != "{}" {
+ req.SetBufferBody(buf)
+ }
+
+ if req.Metadata.TargetPrefix != "" {
+ target := req.Metadata.TargetPrefix + "." + req.Operation.Name
+ req.HTTPRequest.Header.Add("X-Amz-Target", target)
+ }
+ if req.Metadata.JSONVersion != "" {
+ jsonVersion := req.Metadata.JSONVersion
+ req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion)
+ }
+}
+
+// Unmarshal unmarshals a response for a JSON RPC service.
+func Unmarshal(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+ err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err)
+ }
+ return
+}
+
+// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
+func UnmarshalMeta(req *request.Request) {
+ rest.UnmarshalMeta(req)
+}
+
+// UnmarshalError unmarshals an error response for a JSON RPC service.
+func UnmarshalError(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+ bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err)
+ return
+ }
+ if len(bodyBytes) == 0 {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError", req.HTTPResponse.Status, nil),
+ req.HTTPResponse.StatusCode,
+ "",
+ )
+ return
+ }
+ var jsonErr jsonErrorResponse
+ if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
+ req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err)
+ return
+ }
+
+ codes := strings.SplitN(jsonErr.Code, "#", 2)
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+}
+
+type jsonErrorResponse struct {
+ Code string `json:"__type"`
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonvalue.go
new file mode 100644
index 0000000..8a27043
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/jsonvalue.go
@@ -0,0 +1,76 @@
+package protocol
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// EscapeMode is the mode that should be use for escaping a value
+type EscapeMode uint
+
+// The modes for escaping a value before it is marshaled, and unmarshaled.
+const (
+ NoEscape EscapeMode = iota
+ Base64Escape
+ QuotedEscape
+)
+
+// EncodeJSONValue marshals the value into a JSON string, and optionally base64
+// encodes the string before returning it.
+//
+// Will panic if the escape mode is unknown.
+func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+
+ switch escape {
+ case NoEscape:
+ return string(b), nil
+ case Base64Escape:
+ return base64.StdEncoding.EncodeToString(b), nil
+ case QuotedEscape:
+ return strconv.Quote(string(b)), nil
+ }
+
+ panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
+}
+
+// DecodeJSONValue will attempt to decode the string input as a JSONValue.
+// Optionally decoding base64 the value first before JSON unmarshaling.
+//
+// Will panic if the escape mode is unknown.
+func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
+ var b []byte
+ var err error
+
+ switch escape {
+ case NoEscape:
+ b = []byte(v)
+ case Base64Escape:
+ b, err = base64.StdEncoding.DecodeString(v)
+ case QuotedEscape:
+ var u string
+ u, err = strconv.Unquote(v)
+ b = []byte(u)
+ default:
+ panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ m := aws.JSONValue{}
+ err = json.Unmarshal(b, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/metadata.go
new file mode 100644
index 0000000..3a94fd0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/metadata.go
@@ -0,0 +1,24 @@
+package protocol
+
+// An Attribute is a FieldValue that resides within the imediant context of
+// another field. Such as XML attribute for tags.
+type Attribute struct {
+ Name string
+ Value ValueMarshaler
+ Meta Metadata
+}
+
+// Metadata is a collection of configuration flags for encoders to render the
+// output.
+type Metadata struct {
+ Attributes []Attribute
+
+ Flatten bool
+
+ ListLocationName string
+ MapLocationNameKey string
+ MapLocationNameValue string
+
+ XMLNamespacePrefix string
+ XMLNamespaceURI string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/path_replace.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/path_replace.go
new file mode 100644
index 0000000..ae64a17
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/path_replace.go
@@ -0,0 +1,136 @@
+package protocol
+
+import (
+ "bytes"
+ "fmt"
+)
+
+const (
+ uriTokenStart = '{'
+ uriTokenStop = '}'
+ uriTokenSkip = '+'
+)
+
+func bufCap(b []byte, n int) []byte {
+ if cap(b) < n {
+ return make([]byte, 0, n)
+ }
+
+ return b[0:0]
+}
+
+// PathReplace replaces path elements using field buffers
+type PathReplace struct {
+ // May mutate path slice
+ path []byte
+ rawPath []byte
+ fieldBuf []byte
+}
+
+// NewPathReplace creats a built PathReplace value that can be used to replace
+// path elements.
+func NewPathReplace(path string) PathReplace {
+ return PathReplace{
+ path: []byte(path),
+ rawPath: []byte(path),
+ }
+}
+
+// Encode returns an unescaped path, and escaped path.
+func (r *PathReplace) Encode() (path string, rawPath string) {
+ return string(r.path), string(r.rawPath)
+}
+
+// ReplaceElement replaces a single element in the path string.
+func (r *PathReplace) ReplaceElement(key, val string) (err error) {
+ r.path, r.fieldBuf, err = replacePathElement(r.path, r.fieldBuf, key, val, false)
+ r.rawPath, r.fieldBuf, err = replacePathElement(r.rawPath, r.fieldBuf, key, val, true)
+ return err
+}
+
+func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) {
+ fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] }
+ fieldBuf = append(fieldBuf, uriTokenStart)
+ fieldBuf = append(fieldBuf, key...)
+
+ start := bytes.Index(path, fieldBuf)
+ end := start + len(fieldBuf)
+ if start < 0 || len(path[end:]) == 0 {
+ // TODO what to do about error?
+ return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path)
+ }
+
+ encodeSep := true
+ if path[end] == uriTokenSkip {
+ // '+' token means do not escape slashes
+ encodeSep = false
+ end++
+ }
+
+ if escape {
+ val = escapePath(val, encodeSep)
+ }
+
+ if path[end] != uriTokenStop {
+ return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path)
+ }
+ end++
+
+ fieldBuf = bufCap(fieldBuf, len(val))
+ fieldBuf = append(fieldBuf, val...)
+
+ keyLen := end - start
+ valLen := len(fieldBuf)
+
+ if keyLen == valLen {
+ copy(path[start:], fieldBuf)
+ return path, fieldBuf, nil
+ }
+
+ newLen := len(path) + (valLen - keyLen)
+ if len(path) < newLen {
+ path = path[:cap(path)]
+ }
+ if cap(path) < newLen {
+ newURI := make([]byte, newLen)
+ copy(newURI, path)
+ path = newURI
+ }
+
+ // shift
+ copy(path[start+valLen:], path[end:])
+ path = path[:newLen]
+ copy(path[start:], fieldBuf)
+
+ return path, fieldBuf, nil
+}
+
+// copied from rest.EscapePath
+// escapes part of a URL path in Amazon style
+func escapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+var noEscape [256]bool
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/build.go
new file mode 100644
index 0000000..3905c4b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/build.go
@@ -0,0 +1,36 @@
+// Package query provides serialization of AWS query requests, and responses.
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ request "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.Metadata.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
+ return
+ }
+
+ if r.ExpireTime == 0 {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/queryutil/queryutil.go
new file mode 100644
index 0000000..01de5ca
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,261 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "", false)
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag, parentCollection bool) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ var err error
+ switch t {
+ case "structure":
+ err = q.parseStruct(v, value, prefix)
+ case "list":
+ err = q.parseList(v, value, prefix, tag)
+ case "map":
+ err = q.parseMap(v, value, prefix, tag)
+ default:
+ err = q.parseScalar(v, value, prefix, tag, parentCollection)
+ }
+
+ if protocol.IsNotSetError(err) {
+ return nil
+ }
+ return err
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ elemValue := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ elemValue = reflect.ValueOf(token)
+ }
+
+ var name string
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, elemValue, name, field.Tag, false); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ if _, ok := value.Interface().([]byte); ok {
+ return q.parseScalar(v, value, prefix, tag, true)
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ if listName := tag.Get("locationNameList"); listName == "" {
+ prefix += ".member"
+ } else {
+ prefix += "." + listName
+ }
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, "", true); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, "", true); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, "", true); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag, parentCollection bool) error {
+ if r.Kind() == reflect.String {
+ val, err := protocol.GetValue(r)
+ isEnum := len(tag.Get("enum")) != 0
+
+ if err == nil || parentCollection {
+ v.Set(name, val)
+ } else if isEnum && len(val) > 0 {
+ v.Set(name, val)
+ } else if !isEnum {
+ v.Set(name, val)
+ }
+ return err
+ }
+
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ v.Set(name, value.UTC().Format(ISO8601UTC))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal.go
new file mode 100644
index 0000000..36c0e12
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal.go
@@ -0,0 +1,33 @@
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ request "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
+ return
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal_error.go
new file mode 100644
index 0000000..2af00c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,66 @@
+package query
+
+import (
+ "encoding/xml"
+ "io/ioutil"
+
+ request "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"ErrorResponse"`
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+type xmlServiceUnavailableResponse struct {
+ XMLName xml.Name `xml:"ServiceUnavailableException"`
+}
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
+ return
+ }
+
+ // First check for specific error
+ resp := xmlErrorResponse{}
+ decodeErr := xml.Unmarshal(bodyBytes, &resp)
+ if decodeErr == nil {
+ reqID := resp.RequestID
+ if reqID == "" {
+ reqID = r.RequestID
+ }
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(resp.Code, resp.Message, nil),
+ r.HTTPResponse.StatusCode,
+ reqID,
+ )
+ return
+ }
+
+ // Check for unhandled error
+ servUnavailResp := xmlServiceUnavailableResponse{}
+ unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
+ if unavailErr == nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("ServiceUnavailableException", "service is unavailable", nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Failed to retrieve any error message from the response body
+ r.Error = awserr.New("SerializationError",
+ "failed to decode query XML error response", decodeErr)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query_encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query_encoder.go
new file mode 100644
index 0000000..baebc99
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/query_encoder.go
@@ -0,0 +1,105 @@
+package protocol
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// QueryMapEncoder builds a query string.
+type QueryMapEncoder struct {
+ Prefix string
+ Query url.Values
+ Err error
+}
+
+// List will return a new QueryListEncoder.
+func (e *QueryMapEncoder) List(k string) ListEncoder {
+ if len(e.Prefix) > 0 {
+ k = e.Prefix + k
+ }
+
+ return &QueryListEncoder{k, e.Query, nil}
+}
+
+// Map will return a new QueryMapEncoder.
+func (e *QueryMapEncoder) Map(k string) MapEncoder {
+ if len(e.Prefix) > 0 {
+ k = e.Prefix + k
+ }
+
+ return &QueryMapEncoder{k, e.Query, nil}
+}
+
+// Start does nothing.
+func (e *QueryMapEncoder) Start() {}
+
+// End does nothing.
+func (e *QueryMapEncoder) End() {}
+
+// MapSetValue adds a single value to the query.
+func (e *QueryMapEncoder) MapSetValue(k string, v ValueMarshaler) {
+ if e.Err != nil {
+ return
+ }
+
+ str, err := v.MarshalValue()
+ if err != nil {
+ e.Err = err
+ return
+ }
+
+ if len(e.Prefix) > 0 {
+ k = e.Prefix + k
+ }
+
+ e.Query.Add(k, str)
+}
+
+// MapSetFields Is not implemented, query map of map is undefined.
+func (e *QueryMapEncoder) MapSetFields(k string, m FieldMarshaler) {
+ e.Err = fmt.Errorf("query map encoder MapSetFields not supported, %s", e.Prefix)
+}
+
+// QueryListEncoder will encode list values nested into a query key.
+type QueryListEncoder struct {
+ Key string
+ Query url.Values
+ Err error
+}
+
+// List will return a new QueryListEncoder.
+func (e *QueryListEncoder) List() ListEncoder {
+ return &QueryListEncoder{e.Key, e.Query, nil}
+}
+
+// Start does nothing for the query protocol.
+func (e *QueryListEncoder) Start() {}
+
+// End does nothing for the query protocol.
+func (e *QueryListEncoder) End() {}
+
+// Map will return a new QueryMapEncoder.
+func (e *QueryListEncoder) Map() MapEncoder {
+ k := e.Key
+ return &QueryMapEncoder{k, e.Query, nil}
+}
+
+// ListAddValue encodes an individual list value into the querystring.
+func (e *QueryListEncoder) ListAddValue(v ValueMarshaler) {
+ if e.Err != nil {
+ return
+ }
+
+ str, err := v.MarshalValue()
+ if err != nil {
+ e.Err = err
+ return
+ }
+
+ e.Query.Add(e.Key, str)
+}
+
+// ListAddFields Is not implemented, query list of FieldMarshaler is undefined.
+func (e *QueryListEncoder) ListAddFields(m FieldMarshaler) {
+ e.Err = fmt.Errorf("query list encoder ListAddFields not supported, %s", e.Key)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/build.go
new file mode 100644
index 0000000..14c9f47
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/build.go
@@ -0,0 +1,339 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ request "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/private/protocol"
+)
+
+// RFC822 returns an RFC822 formatted timestamp for AWS protocols
+const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, false)
+ buildBody(r, v)
+ }
+}
+
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, true)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
+ query := r.HTTPRequest.URL.Query()
+
+ // Setup the raw path to match the base path pattern. This is needed
+ // so that when the path is mutated a custom escaped version can be
+ // stored in RawPath that will be used by the Go client.
+ r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch m.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if !m.Elem().IsValid() {
+ continue
+ }
+ }
+
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ var err error
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
+ case "header":
+ err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
+ case "uri":
+ err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
+ case "querystring":
+ err = buildQueryString(query, m, name, field.Tag)
+ default:
+ if buildGETQuery {
+ err = buildQueryString(query, m, name, field.Tag)
+ }
+ }
+
+ if protocol.IsNotSetError(err) {
+ err = nil
+ }
+ r.Error = err
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ if !r.Config.DisableRestProtocolURICleaning {
+ cleanPath(r.HTTPRequest.URL)
+ }
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New("SerializationError",
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+ var err error
+ str := ""
+
+ if v.Kind() == reflect.String {
+ str, err = protocol.GetValue(v)
+ } else {
+ str, err = convertType(v, tag)
+ }
+
+ if protocol.IsNotSetError(err) {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ header.Add(name, str)
+
+ return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+ prefix := tag.Get("locationName")
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key), tag)
+ if protocol.IsNotSetError(err) {
+ continue
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+
+ }
+
+ header.Add(prefix+key.String(), str)
+ }
+ return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+ value := ""
+ var err error
+ if v.Kind() == reflect.String {
+ value, err = protocol.GetValue(v)
+ } else {
+ value, err = convertType(v, tag)
+ }
+
+ if protocol.IsNotSetError(err) {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+ u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
+
+ return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
+ if kind := v.Kind(); kind == reflect.String {
+ value, err := protocol.GetValue(v)
+ if err == nil {
+ query.Add(name, value)
+ }
+ return err
+ } else if kind == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.String {
+ for i := 0; i < v.Len(); i++ {
+ query.Add(name, v.Index(i).String())
+ }
+ return nil
+ }
+
+ switch value := v.Interface().(type) {
+ case []*string:
+ for _, item := range value {
+ query.Add(name, *item)
+ }
+ case []string:
+ for _, item := range value {
+ query.Add(name, item)
+ }
+ case map[string]*string:
+ for key, item := range value {
+ query.Add(key, *item)
+ }
+ case map[string]string:
+ for key, item := range value {
+ query.Add(key, item)
+ }
+ case map[string][]*string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, *item)
+ }
+ }
+ case map[string][]string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, item)
+ }
+ }
+ default:
+ str, err := convertType(v, tag)
+ if protocol.IsNotSetError(err) {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ query.Set(name, str)
+ }
+
+ return nil
+}
+
+func cleanPath(u *url.URL) {
+ hasSlash := strings.HasSuffix(u.Path, "/")
+
+ // clean up path, removing duplicate `/`
+ u.Path = path.Clean(u.Path)
+ u.RawPath = path.Clean(u.RawPath)
+
+ if hasSlash && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ u.RawPath += "/"
+ }
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return "", &protocol.ErrValueNotSet{}
+ }
+
+ switch value := v.Interface().(type) {
+ case string:
+ str = value
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ str = value.UTC().Format(RFC822)
+ case aws.JSONValue:
+ if len(value) == 0 {
+ return "", &protocol.ErrValueNotSet{}
+ }
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ str, err = protocol.EncodeJSONValue(value, escaping)
+ if err != nil {
+ return "", fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+
+ default:
+ err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return "", err
+ }
+ return str, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/encode.go
new file mode 100644
index 0000000..ff1ce94
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/encode.go
@@ -0,0 +1,144 @@
+package rest
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/aws/aws-sdk-go-v2/private/protocol"
+)
+
+// An Encoder provides encoding of REST URI path, query, and header components
+// of an HTTP request. Can also encode a stream as the payload.
+//
+// Does not support SetFields.
+type Encoder struct {
+ req *http.Request
+
+ path protocol.PathReplace
+
+ query url.Values
+ header http.Header
+
+ payload io.ReadSeeker
+
+ err error
+}
+
+// NewEncoder creates a new encoder from the passed in request. All query and
+// header values will be added on top of the request's existing values. Overwriting
+// duplicate values.
+func NewEncoder(req *http.Request) *Encoder {
+ e := &Encoder{
+ req: req,
+
+ path: protocol.NewPathReplace(req.URL.Path),
+ query: req.URL.Query(),
+ header: req.Header,
+ }
+
+ return e
+}
+
+// Encode will return the request and body if one was set. If the body
+// payload was not set the io.ReadSeeker will be nil.
+//
+// returns any error if one occured while encoding the API's parameters.
+func (e *Encoder) Encode() (*http.Request, io.ReadSeeker, error) {
+ if e.err != nil {
+ return nil, nil, e.err
+ }
+
+ e.req.URL.Path, e.req.URL.RawPath = e.path.Encode()
+ e.req.URL.RawQuery = e.query.Encode()
+ e.req.Header = e.header
+
+ return e.req, e.payload, nil
+}
+
+// SetValue will set a value to the header, path, query.
+//
+// If the request's method is GET all BodyTarget values will be written to
+// the query string.
+func (e *Encoder) SetValue(t protocol.Target, k string, v protocol.ValueMarshaler, meta protocol.Metadata) {
+ if e.err != nil {
+ return
+ }
+
+ var str string
+ str, e.err = v.MarshalValue()
+ if e.err != nil {
+ return
+ }
+
+ switch t {
+ case protocol.HeaderTarget:
+ e.header.Set(k, str)
+ case protocol.PathTarget:
+ e.path.ReplaceElement(k, str)
+ case protocol.QueryTarget:
+ e.query.Set(k, str)
+ case protocol.BodyTarget:
+ if e.req.Method != "GET" {
+ e.err = fmt.Errorf("body target not supported for rest non-GET methods %s, %s", t, k)
+ return
+ }
+ e.query.Set(k, str)
+ default:
+ e.err = fmt.Errorf("unknown SetValue rest encode target, %s, %s", t, k)
+ }
+}
+
+// SetStream will set the stream to the payload of the request.
+func (e *Encoder) SetStream(t protocol.Target, k string, v protocol.StreamMarshaler, meta protocol.Metadata) {
+ if e.err != nil {
+ return
+ }
+
+ switch t {
+ case protocol.PayloadTarget:
+ e.payload, e.err = v.MarshalStream()
+ default:
+ e.err = fmt.Errorf("unknown SetStream rest encode target, %s, %s", t, k)
+ }
+}
+
+// List will set the nested list values to the header or query.
+func (e *Encoder) List(t protocol.Target, k string, meta protocol.Metadata) protocol.ListEncoder {
+ if e.err != nil {
+ return nil
+ }
+
+ switch t {
+ case protocol.QueryTarget:
+ return &protocol.QueryListEncoder{Key: k, Query: e.query}
+ case protocol.HeaderTarget:
+ return &protocol.HeaderListEncoder{Key: k, Header: e.header}
+ default:
+ e.err = fmt.Errorf("unknown SetList rest encode target, %s, %s", t, k)
+ return nil
+ }
+}
+
+// Map will set the nested map values to the header or query.
+func (e *Encoder) Map(t protocol.Target, k string, meta protocol.Metadata) protocol.MapEncoder {
+ if e.err != nil {
+ return nil
+ }
+
+ switch t {
+ case protocol.QueryTarget:
+ return &protocol.QueryMapEncoder{Query: e.query}
+ case protocol.HeadersTarget:
+ return &protocol.HeaderMapEncoder{Prefix: k, Header: e.header}
+ default:
+ e.err = fmt.Errorf("unknown SetMap rest encode target, %s, %s", t, k)
+ return nil
+ }
+}
+
+// SetFields is not supported for REST encoder.
+func (e *Encoder) SetFields(t protocol.Target, k string, m protocol.FieldMarshaler, meta protocol.Metadata) {
+ e.err = fmt.Errorf("rest encoder SetFields not supported, %s, %s", t, k)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/payload.go
new file mode 100644
index 0000000..4366de2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/payload.go
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/unmarshal.go
new file mode 100644
index 0000000..a9f1415
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/rest/unmarshal.go
@@ -0,0 +1,251 @@
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ request "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/private/protocol"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalBody(r, v)
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+ if r.RequestID == "" {
+ // Alternative version of request id in the header
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+ }
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalLocationElements(r, v)
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ payload.Set(reflect.ValueOf(b))
+ }
+ case *string:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+ }
+ default:
+ switch payload.Type().String() {
+ case "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ case "io.ReadSeeker":
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to read response body", err)
+ return
+ }
+ payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+ default:
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ defer r.HTTPResponse.Body.Close()
+ r.Error = awserr.New("SerializationError",
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ case "header":
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ case int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+ switch r.Interface().(type) {
+ case map[string]string: // we only support string map value types
+ out := map[string]string{}
+ for k, v := range headers {
+ k = http.CanonicalHeaderKey(k)
+ if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ out[k[len(prefix):]] = v[0]
+ }
+ }
+ r.Set(reflect.ValueOf(out))
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+ isJSONValue := tag.Get("type") == "jsonvalue"
+ if isJSONValue {
+ if len(header) == 0 {
+ return nil
+ }
+ } else if v.Kind() == reflect.String {
+ if len(header) > 0 {
+ v.SetString(header)
+ }
+ return nil
+ } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ v.Set(reflect.ValueOf(&header))
+ case string:
+ v.Set(reflect.ValueOf(header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(f))
+ case *time.Time:
+ t, err := time.Parse(RFC822, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ case time.Time:
+ t, err := time.Parse(RFC822, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(t))
+ case aws.JSONValue:
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ m, err := protocol.DecodeJSONValue(header, escaping)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(m))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/target.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/target.go
new file mode 100644
index 0000000..36e65d3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/target.go
@@ -0,0 +1,38 @@
+package protocol
+
+import "fmt"
+
+// Target is the encode and decode targets of protocol marshaling.
+type Target int
+
+// The protocol marshaling targets.
+const (
+ PathTarget Target = iota
+ QueryTarget
+ HeaderTarget
+ HeadersTarget
+ StatusCodeTarget
+ BodyTarget
+ PayloadTarget
+)
+
+func (e Target) String() string {
+ switch e {
+ case PathTarget:
+ return "Path"
+ case QueryTarget:
+ return "Query"
+ case HeaderTarget:
+ return "Header"
+ case HeadersTarget:
+ return "Headers"
+ case StatusCodeTarget:
+ return "StatusCode"
+ case BodyTarget:
+ return "Body"
+ case PayloadTarget:
+ return "Payload"
+ default:
+ panic(fmt.Sprintf("// unknown encoding target, %d", e))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/unmarshal.go
new file mode 100644
index 0000000..de04834
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/unmarshal.go
@@ -0,0 +1,21 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+
+ request "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+ if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+ return
+ }
+
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/value.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/value.go
new file mode 100644
index 0000000..892b286
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/value.go
@@ -0,0 +1,30 @@
+package protocol
+
+import (
+ "reflect"
+)
+
+// ErrValueNotSet is an error that is returned when the value
+// has not been set.
+type ErrValueNotSet struct{}
+
+func (err *ErrValueNotSet) Error() string {
+ return "value not set"
+}
+
+// IsNotSetError will return true if the error is of ErrValueNotSet
+func IsNotSetError(err error) bool {
+ _, ok := err.(*ErrValueNotSet)
+ return ok
+}
+
+// GetValue will return the value that is associated with the reflect.Value.
+// If that value is not set, this will return an ErrValueNotSet
+func GetValue(r reflect.Value) (string, error) {
+ val := r.String()
+ if len(val) == 0 {
+ return "", &ErrValueNotSet{}
+ }
+
+ return val, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/build.go
new file mode 100644
index 0000000..ab4f1e4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,302 @@
+// Package xmlutil provides XML serialization of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder.
+// Error will be returned if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, false)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("_"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ fieldAdded := false
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ mTag := field.Tag
+ if mTag.Get("location") != "" { // skip non-body members
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(token)
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+
+ fieldAdded = true
+ }
+
+ if fieldAdded { // only append this child if we have one ore more valid members
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ str = converted.UTC().Format(ISO8601UTC)
+ default:
+ if value.Kind() != reflect.String {
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+ str = value.Convert(reflect.TypeOf("")).String()
+ }
+
+ if len(str) == 0 {
+ return nil
+ }
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 0000000..2b95c35
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,278 @@
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, err := XMLToStruct(d, nil)
+ if err != nil {
+ return err
+ }
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err = parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ _, tok := r.Interface().(*time.Time)
+ if _, ok := r.Interface().(time.Time); !(ok || tok) {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := r.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ if val, ok := node.findElem(name); ok {
+ elems = []*XMLNode{{Text: val}}
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.Kind() == reflect.String {
+ r.SetString(node.Text)
+ return nil
+ }
+
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() {
+ r.Set(reflect.New(r.Type().Elem()))
+ }
+ r = r.Elem()
+ }
+
+ switch r.Interface().(type) {
+ case string:
+ r.Set(reflect.ValueOf(node.Text))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(v))
+ case int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(v))
+ case float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(v))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ t, err := time.Parse(ISO8601UTC, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 0000000..3e970b6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,147 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+
+ namespaces map[string]string
+ parent *XMLNode
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return out, err
+ }
+ }
+
+ if tok == nil {
+ break
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ out.findNamespaces()
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ node.findNamespaces()
+ tempOut := *out
+ // Save into a temp variable, simply because out gets squashed during
+ // loop iterations
+ node.parent = &tempOut
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ out = &XMLNode{}
+ }
+ }
+ return out, nil
+}
+
+func (n *XMLNode) findNamespaces() {
+ ns := map[string]string{}
+ for _, a := range n.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ }
+ }
+
+ n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+ for node := n; node != nil; node = node.parent {
+ for _, a := range node.Attr {
+ namespace := a.Name.Space
+ if v, ok := node.namespaces[namespace]; ok {
+ namespace = v
+ }
+ if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+ return a.Value, true
+ }
+ }
+ }
+ return "", false
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
+
+ if node.Text != "" {
+ e.EncodeToken(xml.CharData([]byte(node.Text)))
+ } else if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(xml.EndElement{Name: node.Name})
+ return e.Flush()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api.go
new file mode 100644
index 0000000..f2caf7e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api.go
@@ -0,0 +1,10337 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+ "github.com/aws/aws-sdk-go-v2/private/protocol"
+ "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc"
+)
+
+const opBatchGetItem = "BatchGetItem"
+
+// BatchGetItemRequest is a API request type for the BatchGetItem API operation.
+type BatchGetItemRequest struct {
+ *aws.Request
+ Input *BatchGetItemInput
+ Copy func(*BatchGetItemInput) BatchGetItemRequest
+}
+
+// Send marshals and sends the BatchGetItem API request.
+func (r BatchGetItemRequest) Send() (*BatchGetItemOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*BatchGetItemOutput), nil
+}
+
+// BatchGetItemRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// The BatchGetItem operation returns the attributes of one or more items from
+// one or more tables. You identify requested items by primary key.
+//
+// A single operation can retrieve up to 16 MB of data, which can contain as
+// many as 100 items. BatchGetItem will return a partial result if the response
+// size limit is exceeded, the table's provisioned throughput is exceeded, or
+// an internal processing failure occurs. If a partial result is returned, the
+// operation returns a value for UnprocessedKeys. You can use this value to
+// retry the operation starting with the next item to get.
+//
+// If you request more than 100 items BatchGetItem will return a ValidationException
+// with the message "Too many items requested for the BatchGetItem call".
+//
+// For example, if you ask to retrieve 100 items, but each individual item is
+// 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB
+// limit). It also returns an appropriate UnprocessedKeys value so you can get
+// the next page of results. If desired, your application can include its own
+// logic to assemble the pages of results into one data set.
+//
+// If none of the items can be processed due to insufficient provisioned throughput
+// on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException.
+// If at least one of the items is successfully processed, then BatchGetItem
+// completes successfully, while returning the keys of the unread items in UnprocessedKeys.
+//
+// If DynamoDB returns any unprocessed items, you should retry the batch operation
+// on those items. However, we strongly recommend that you use an exponential
+// backoff algorithm. If you retry the batch operation immediately, the underlying
+// read or write requests can still fail due to throttling on the individual
+// tables. If you delay the batch operation using exponential backoff, the individual
+// requests in the batch are much more likely to succeed.
+//
+// For more information, see Batch Operations and Error Handling (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations)
+// in the Amazon DynamoDB Developer Guide.
+//
+// By default, BatchGetItem performs eventually consistent reads on every table
+// in the request. If you want strongly consistent reads instead, you can set
+// ConsistentRead to true for any or all tables.
+//
+// In order to minimize response latency, BatchGetItem retrieves items in parallel.
+//
+// When designing your application, keep in mind that DynamoDB does not return
+// items in any particular order. To help parse the response by item, include
+// the primary key values for the items in your request in the ProjectionExpression
+// parameter.
+//
+// If a requested item does not exist, it is not returned in the result. Requests
+// for nonexistent items consume the minimum read capacity units according to
+// the type of read. For more information, see Capacity Units Calculations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations)
+// in the Amazon DynamoDB Developer Guide.
+//
+// // Example sending a request using the BatchGetItemRequest method.
+// req := client.BatchGetItemRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem
+func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) BatchGetItemRequest {
+ op := &aws.Operation{
+ Name: opBatchGetItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &aws.Paginator{
+ InputTokens: []string{"RequestItems"},
+ OutputTokens: []string{"UnprocessedKeys"},
+ LimitToken: "",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &BatchGetItemInput{}
+ }
+
+ output := &BatchGetItemOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return BatchGetItemRequest{Request: req, Input: input, Copy: c.BatchGetItemRequest}
+}
+
+// Paginate pages iterates over the pages of a BatchGetItemRequest operation,
+// calling the Next method for each page. Using the paginators Next
+// method will depict whether or not there are more pages.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a BatchGetItem operation.
+// req := client.BatchGetItemRequest(input)
+// p := req.Paginate()
+// for p.Next() {
+// page := p.CurrentPage()
+// }
+//
+// if err := p.Err(); err != nil {
+// return err
+// }
+//
+func (p *BatchGetItemRequest) Paginate(opts ...aws.Option) BatchGetItemPager {
+ return BatchGetItemPager{
+ Pager: aws.Pager{
+ NewRequest: func() (*aws.Request, error) {
+ var inCpy *BatchGetItemInput
+ if p.Input != nil {
+ tmp := *p.Input
+ inCpy = &tmp
+ }
+
+ req := p.Copy(inCpy)
+ req.ApplyOptions(opts...)
+
+ return req.Request, nil
+ },
+ },
+ }
+}
+
+// BatchGetItemPager is used to paginate the request. This can be done by
+// calling Next and CurrentPage.
+type BatchGetItemPager struct {
+ aws.Pager
+}
+
+func (p *BatchGetItemPager) CurrentPage() *BatchGetItemOutput {
+ return p.Pager.CurrentPage().(*BatchGetItemOutput)
+}
+
+const opBatchWriteItem = "BatchWriteItem"
+
+// BatchWriteItemRequest is a API request type for the BatchWriteItem API operation.
+type BatchWriteItemRequest struct {
+ *aws.Request
+ Input *BatchWriteItemInput
+ Copy func(*BatchWriteItemInput) BatchWriteItemRequest
+}
+
+// Send marshals and sends the BatchWriteItem API request.
+func (r BatchWriteItemRequest) Send() (*BatchWriteItemOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*BatchWriteItemOutput), nil
+}
+
+// BatchWriteItemRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// The BatchWriteItem operation puts or deletes multiple items in one or more
+// tables. A single call to BatchWriteItem can write up to 16 MB of data, which
+// can comprise as many as 25 put or delete requests. Individual items to be
+// written can be as large as 400 KB.
+//
+// BatchWriteItem cannot update items. To update items, use the UpdateItem action.
+//
+// The individual PutItem and DeleteItem operations specified in BatchWriteItem
+// are atomic; however BatchWriteItem as a whole is not. If any requested operations
+// fail because the table's provisioned throughput is exceeded or an internal
+// processing failure occurs, the failed operations are returned in the UnprocessedItems
+// response parameter. You can investigate and optionally resend the requests.
+// Typically, you would call BatchWriteItem in a loop. Each iteration would
+// check for unprocessed items and submit a new BatchWriteItem request with
+// those unprocessed items until all items have been processed.
+//
+// Note that if none of the items can be processed due to insufficient provisioned
+// throughput on all of the tables in the request, then BatchWriteItem will
+// return a ProvisionedThroughputExceededException.
+//
+// If DynamoDB returns any unprocessed items, you should retry the batch operation
+// on those items. However, we strongly recommend that you use an exponential
+// backoff algorithm. If you retry the batch operation immediately, the underlying
+// read or write requests can still fail due to throttling on the individual
+// tables. If you delay the batch operation using exponential backoff, the individual
+// requests in the batch are much more likely to succeed.
+//
+// For more information, see Batch Operations and Error Handling (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations)
+// in the Amazon DynamoDB Developer Guide.
+//
+// With BatchWriteItem, you can efficiently write or delete large amounts of
+// data, such as from Amazon Elastic MapReduce (EMR), or copy data from another
+// database into DynamoDB. In order to improve performance with these large-scale
+// operations, BatchWriteItem does not behave in the same way as individual
+// PutItem and DeleteItem calls would. For example, you cannot specify conditions
+// on individual put and delete requests, and BatchWriteItem does not return
+// deleted items in the response.
+//
+// If you use a programming language that supports concurrency, you can use
+// threads to write items in parallel. Your application must include the necessary
+// logic to manage the threads. With languages that don't support threading,
+// you must update or delete the specified items one at a time. In both situations,
+// BatchWriteItem performs the specified put and delete operations in parallel,
+// giving you the power of the thread pool approach without having to introduce
+// complexity into your application.
+//
+// Parallel processing reduces latency, but each specified put and delete request
+// consumes the same number of write capacity units whether it is processed
+// in parallel or not. Delete operations on nonexistent items consume one write
+// capacity unit.
+//
+// If one or more of the following is true, DynamoDB rejects the entire batch
+// write operation:
+//
+// * One or more tables specified in the BatchWriteItem request does not
+// exist.
+//
+// * Primary key attributes specified on an item in the request do not match
+// those in the corresponding table's primary key schema.
+//
+// * You try to perform multiple operations on the same item in the same
+// BatchWriteItem request. For example, you cannot put and delete the same
+// item in the same BatchWriteItem request.
+//
+// * Your request contains at least two items with identical hash and range
+// keys (which essentially is two put operations).
+//
+// * There are more than 25 requests in the batch.
+//
+// * Any individual item in a batch exceeds 400 KB.
+//
+// * The total request size exceeds 16 MB.
+//
+// // Example sending a request using the BatchWriteItemRequest method.
+// req := client.BatchWriteItemRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem
+func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) BatchWriteItemRequest {
+ op := &aws.Operation{
+ Name: opBatchWriteItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &BatchWriteItemInput{}
+ }
+
+ output := &BatchWriteItemOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return BatchWriteItemRequest{Request: req, Input: input, Copy: c.BatchWriteItemRequest}
+}
+
+const opCreateBackup = "CreateBackup"
+
+// CreateBackupRequest is a API request type for the CreateBackup API operation.
+type CreateBackupRequest struct {
+ *aws.Request
+ Input *CreateBackupInput
+ Copy func(*CreateBackupInput) CreateBackupRequest
+}
+
+// Send marshals and sends the CreateBackup API request.
+func (r CreateBackupRequest) Send() (*CreateBackupOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*CreateBackupOutput), nil
+}
+
+// CreateBackupRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Creates a backup for an existing table.
+//
+// Each time you create an On-Demand Backup, the entire table data is backed
+// up. There is no limit to the number of on-demand backups that can be taken.
+//
+// When you create an On-Demand Backup, a time marker of the request is cataloged,
+// and the backup is created asynchronously, by applying all changes until the
+// time of the request to the last full table snapshot. Backup requests are
+// processed instantaneously and become available for restore within minutes.
+//
+// You can call CreateBackup at a maximum rate of 50 times per second.
+//
+// All backups in DynamoDB work without consuming any provisioned throughput
+// on the table.
+//
+// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed
+// to contain all data committed to the table up to 14:24:00, and data committed
+// after 14:26:00 will not be. The backup may or may not contain data modifications
+// made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal
+// consistency.
+//
+// Along with data, the following are also included on the backups:
+//
+// * Global secondary indexes (GSIs)
+//
+// * Local secondary indexes (LSIs)
+//
+// * Streams
+//
+// * Provisioned read and write capacity
+//
+// // Example sending a request using the CreateBackupRequest method.
+// req := client.CreateBackupRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup
+func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) CreateBackupRequest {
+ op := &aws.Operation{
+ Name: opCreateBackup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateBackupInput{}
+ }
+
+ output := &CreateBackupOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return CreateBackupRequest{Request: req, Input: input, Copy: c.CreateBackupRequest}
+}
+
+const opCreateGlobalTable = "CreateGlobalTable"
+
+// CreateGlobalTableRequest is a API request type for the CreateGlobalTable API operation.
+type CreateGlobalTableRequest struct {
+ *aws.Request
+ Input *CreateGlobalTableInput
+ Copy func(*CreateGlobalTableInput) CreateGlobalTableRequest
+}
+
+// Send marshals and sends the CreateGlobalTable API request.
+func (r CreateGlobalTableRequest) Send() (*CreateGlobalTableOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*CreateGlobalTableOutput), nil
+}
+
+// CreateGlobalTableRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Creates a global table from an existing table. A global table creates a replication
+// relationship between two or more DynamoDB tables with the same table name
+// in the provided regions.
+//
+// If you want to add a new replica table to a global table, each of the following
+// conditions must be true:
+//
+// * The table must have the same primary key as all of the other replicas.
+//
+// * The table must have the same name as all of the other replicas.
+//
+// * The table must have DynamoDB Streams enabled, with the stream containing
+// both the new and the old images of the item.
+//
+// * None of the replica tables in the global table can contain any data.
+//
+// If global secondary indexes are specified, then the following conditions
+// must also be met:
+//
+// * The global secondary indexes must have the same name.
+//
+// * The global secondary indexes must have the same hash key and sort key
+// (if present).
+//
+// Write capacity settings should be set consistently across your replica tables
+// and secondary indexes. DynamoDB strongly recommends enabling auto scaling
+// to manage the write capacity settings for all of your global tables replicas
+// and indexes.
+//
+// If you prefer to manage write capacity settings manually, you should provision
+// equal replicated write capacity units to your replica tables. You should
+// also provision equal replicated write capacity units to matching secondary
+// indexes across your global table.
+//
+// // Example sending a request using the CreateGlobalTableRequest method.
+// req := client.CreateGlobalTableRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable
+func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) CreateGlobalTableRequest {
+ op := &aws.Operation{
+ Name: opCreateGlobalTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateGlobalTableInput{}
+ }
+
+ output := &CreateGlobalTableOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return CreateGlobalTableRequest{Request: req, Input: input, Copy: c.CreateGlobalTableRequest}
+}
+
+const opCreateTable = "CreateTable"
+
+// CreateTableRequest is a API request type for the CreateTable API operation.
+type CreateTableRequest struct {
+ *aws.Request
+ Input *CreateTableInput
+ Copy func(*CreateTableInput) CreateTableRequest
+}
+
+// Send marshals and sends the CreateTable API request.
+func (r CreateTableRequest) Send() (*CreateTableOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*CreateTableOutput), nil
+}
+
+// CreateTableRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// The CreateTable operation adds a new table to your account. In an AWS account,
+// table names must be unique within each region. That is, you can have two
+// tables with same name if you create the tables in different regions.
+//
+// CreateTable is an asynchronous operation. Upon receiving a CreateTable request,
+// DynamoDB immediately returns a response with a TableStatus of CREATING. After
+// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform
+// read and write operations only on an ACTIVE table.
+//
+// You can optionally define secondary indexes on the new table, as part of
+// the CreateTable operation. If you want to create multiple tables with secondary
+// indexes on them, you must create the tables sequentially. Only one table
+// with secondary indexes can be in the CREATING state at any given time.
+//
+// You can use the DescribeTable action to check the table status.
+//
+// // Example sending a request using the CreateTableRequest method.
+// req := client.CreateTableRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable
+func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) CreateTableRequest {
+ op := &aws.Operation{
+ Name: opCreateTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateTableInput{}
+ }
+
+ output := &CreateTableOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return CreateTableRequest{Request: req, Input: input, Copy: c.CreateTableRequest}
+}
+
+const opDeleteBackup = "DeleteBackup"
+
+// DeleteBackupRequest is a API request type for the DeleteBackup API operation.
+type DeleteBackupRequest struct {
+ *aws.Request
+ Input *DeleteBackupInput
+ Copy func(*DeleteBackupInput) DeleteBackupRequest
+}
+
+// Send marshals and sends the DeleteBackup API request.
+func (r DeleteBackupRequest) Send() (*DeleteBackupOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DeleteBackupOutput), nil
+}
+
+// DeleteBackupRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Deletes an existing backup of a table.
+//
+// You can call DeleteBackup at a maximum rate of 10 times per second.
+//
+// // Example sending a request using the DeleteBackupRequest method.
+// req := client.DeleteBackupRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup
+func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) DeleteBackupRequest {
+ op := &aws.Operation{
+ Name: opDeleteBackup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteBackupInput{}
+ }
+
+ output := &DeleteBackupOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DeleteBackupRequest{Request: req, Input: input, Copy: c.DeleteBackupRequest}
+}
+
+const opDeleteItem = "DeleteItem"
+
+// DeleteItemRequest is a API request type for the DeleteItem API operation.
+type DeleteItemRequest struct {
+ *aws.Request
+ Input *DeleteItemInput
+ Copy func(*DeleteItemInput) DeleteItemRequest
+}
+
+// Send marshals and sends the DeleteItem API request.
+func (r DeleteItemRequest) Send() (*DeleteItemOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DeleteItemOutput), nil
+}
+
+// DeleteItemRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Deletes a single item in a table by primary key. You can perform a conditional
+// delete operation that deletes the item if it exists, or if it has an expected
+// attribute value.
+//
+// In addition to deleting an item, you can also return the item's attribute
+// values in the same operation, using the ReturnValues parameter.
+//
+// Unless you specify conditions, the DeleteItem is an idempotent operation;
+// running it multiple times on the same item or attribute does not result in
+// an error response.
+//
+// Conditional deletes are useful for deleting items only if specific conditions
+// are met. If those conditions are met, DynamoDB performs the delete. Otherwise,
+// the item is not deleted.
+//
+// // Example sending a request using the DeleteItemRequest method.
+// req := client.DeleteItemRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem
+func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) DeleteItemRequest {
+ op := &aws.Operation{
+ Name: opDeleteItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteItemInput{}
+ }
+
+ output := &DeleteItemOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DeleteItemRequest{Request: req, Input: input, Copy: c.DeleteItemRequest}
+}
+
+const opDeleteTable = "DeleteTable"
+
+// DeleteTableRequest is a API request type for the DeleteTable API operation.
+type DeleteTableRequest struct {
+ *aws.Request
+ Input *DeleteTableInput
+ Copy func(*DeleteTableInput) DeleteTableRequest
+}
+
+// Send marshals and sends the DeleteTable API request.
+func (r DeleteTableRequest) Send() (*DeleteTableOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DeleteTableOutput), nil
+}
+
+// DeleteTableRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// The DeleteTable operation deletes a table and all of its items. After a DeleteTable
+// request, the specified table is in the DELETING state until DynamoDB completes
+// the deletion. If the table is in the ACTIVE state, you can delete it. If
+// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException.
+// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException.
+// If table is already in the DELETING state, no error is returned.
+//
+// DynamoDB might continue to accept data read and write operations, such as
+// GetItem and PutItem, on a table in the DELETING state until the table deletion
+// is complete.
+//
+// When you delete a table, any indexes on that table are also deleted.
+//
+// If you have DynamoDB Streams enabled on the table, then the corresponding
+// stream on that table goes into the DISABLED state, and the stream is automatically
+// deleted after 24 hours.
+//
+// Use the DescribeTable action to check the status of the table.
+//
+// // Example sending a request using the DeleteTableRequest method.
+// req := client.DeleteTableRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable
+func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) DeleteTableRequest {
+ op := &aws.Operation{
+ Name: opDeleteTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteTableInput{}
+ }
+
+ output := &DeleteTableOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DeleteTableRequest{Request: req, Input: input, Copy: c.DeleteTableRequest}
+}
+
+const opDescribeBackup = "DescribeBackup"
+
+// DescribeBackupRequest is a API request type for the DescribeBackup API operation.
+type DescribeBackupRequest struct {
+ *aws.Request
+ Input *DescribeBackupInput
+ Copy func(*DescribeBackupInput) DescribeBackupRequest
+}
+
+// Send marshals and sends the DescribeBackup API request.
+func (r DescribeBackupRequest) Send() (*DescribeBackupOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DescribeBackupOutput), nil
+}
+
+// DescribeBackupRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Describes an existing backup of a table.
+//
+// You can call DescribeBackup at a maximum rate of 10 times per second.
+//
+// // Example sending a request using the DescribeBackupRequest method.
+// req := client.DescribeBackupRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup
+func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) DescribeBackupRequest {
+ op := &aws.Operation{
+ Name: opDescribeBackup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeBackupInput{}
+ }
+
+ output := &DescribeBackupOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DescribeBackupRequest{Request: req, Input: input, Copy: c.DescribeBackupRequest}
+}
+
+const opDescribeContinuousBackups = "DescribeContinuousBackups"
+
+// DescribeContinuousBackupsRequest is a API request type for the DescribeContinuousBackups API operation.
+type DescribeContinuousBackupsRequest struct {
+ *aws.Request
+ Input *DescribeContinuousBackupsInput
+ Copy func(*DescribeContinuousBackupsInput) DescribeContinuousBackupsRequest
+}
+
+// Send marshals and sends the DescribeContinuousBackups API request.
+func (r DescribeContinuousBackupsRequest) Send() (*DescribeContinuousBackupsOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DescribeContinuousBackupsOutput), nil
+}
+
+// DescribeContinuousBackupsRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Checks the status of continuous backups and point in time recovery on the
+// specified table. Continuous backups are ENABLED on all tables at table creation.
+// If point in time recovery is enabled, PointInTimeRecoveryStatus will be set
+// to ENABLED.
+//
+// Once continuous backups and point in time recovery are enabled, you can restore
+// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.
+//
+// LatestRestorableDateTime is typically 5 minutes before the current time.
+// You can restore your table to any point in time during the last 35 days.
+//
+// You can call DescribeContinuousBackups at a maximum rate of 10 times per
+// second.
+//
+// // Example sending a request using the DescribeContinuousBackupsRequest method.
+// req := client.DescribeContinuousBackupsRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups
+func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBackupsInput) DescribeContinuousBackupsRequest {
+ op := &aws.Operation{
+ Name: opDescribeContinuousBackups,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeContinuousBackupsInput{}
+ }
+
+ output := &DescribeContinuousBackupsOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DescribeContinuousBackupsRequest{Request: req, Input: input, Copy: c.DescribeContinuousBackupsRequest}
+}
+
+const opDescribeEndpoints = "DescribeEndpoints"
+
+// DescribeEndpointsRequest is a API request type for the DescribeEndpoints API operation.
+type DescribeEndpointsRequest struct {
+ *aws.Request
+ Input *DescribeEndpointsInput
+ Copy func(*DescribeEndpointsInput) DescribeEndpointsRequest
+}
+
+// Send marshals and sends the DescribeEndpoints API request.
+func (r DescribeEndpointsRequest) Send() (*DescribeEndpointsOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DescribeEndpointsOutput), nil
+}
+
+// DescribeEndpointsRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// // Example sending a request using the DescribeEndpointsRequest method.
+// req := client.DescribeEndpointsRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints
+func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) DescribeEndpointsRequest {
+ op := &aws.Operation{
+ Name: opDescribeEndpoints,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeEndpointsInput{}
+ }
+
+ output := &DescribeEndpointsOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DescribeEndpointsRequest{Request: req, Input: input, Copy: c.DescribeEndpointsRequest}
+}
+
+const opDescribeGlobalTable = "DescribeGlobalTable"
+
+// DescribeGlobalTableRequest is a API request type for the DescribeGlobalTable API operation.
+type DescribeGlobalTableRequest struct {
+ *aws.Request
+ Input *DescribeGlobalTableInput
+ Copy func(*DescribeGlobalTableInput) DescribeGlobalTableRequest
+}
+
+// Send marshals and sends the DescribeGlobalTable API request.
+func (r DescribeGlobalTableRequest) Send() (*DescribeGlobalTableOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DescribeGlobalTableOutput), nil
+}
+
+// DescribeGlobalTableRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Returns information about the specified global table.
+//
+// // Example sending a request using the DescribeGlobalTableRequest method.
+// req := client.DescribeGlobalTableRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable
+func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) DescribeGlobalTableRequest {
+ op := &aws.Operation{
+ Name: opDescribeGlobalTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeGlobalTableInput{}
+ }
+
+ output := &DescribeGlobalTableOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DescribeGlobalTableRequest{Request: req, Input: input, Copy: c.DescribeGlobalTableRequest}
+}
+
+const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings"
+
+// DescribeGlobalTableSettingsRequest is a API request type for the DescribeGlobalTableSettings API operation.
+type DescribeGlobalTableSettingsRequest struct {
+ *aws.Request
+ Input *DescribeGlobalTableSettingsInput
+ Copy func(*DescribeGlobalTableSettingsInput) DescribeGlobalTableSettingsRequest
+}
+
+// Send marshals and sends the DescribeGlobalTableSettings API request.
+func (r DescribeGlobalTableSettingsRequest) Send() (*DescribeGlobalTableSettingsOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DescribeGlobalTableSettingsOutput), nil
+}
+
+// DescribeGlobalTableSettingsRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Describes region specific settings for a global table.
+//
+// // Example sending a request using the DescribeGlobalTableSettingsRequest method.
+// req := client.DescribeGlobalTableSettingsRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings
+func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTableSettingsInput) DescribeGlobalTableSettingsRequest {
+ op := &aws.Operation{
+ Name: opDescribeGlobalTableSettings,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeGlobalTableSettingsInput{}
+ }
+
+ output := &DescribeGlobalTableSettingsOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DescribeGlobalTableSettingsRequest{Request: req, Input: input, Copy: c.DescribeGlobalTableSettingsRequest}
+}
+
+const opDescribeLimits = "DescribeLimits"
+
+// DescribeLimitsRequest is a API request type for the DescribeLimits API operation.
+type DescribeLimitsRequest struct {
+ *aws.Request
+ Input *DescribeLimitsInput
+ Copy func(*DescribeLimitsInput) DescribeLimitsRequest
+}
+
+// Send marshals and sends the DescribeLimits API request.
+func (r DescribeLimitsRequest) Send() (*DescribeLimitsOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DescribeLimitsOutput), nil
+}
+
+// DescribeLimitsRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Returns the current provisioned-capacity limits for your AWS account in a
+// region, both for the region as a whole and for any one DynamoDB table that
+// you create there.
+//
+// When you establish an AWS account, the account has initial limits on the
+// maximum read capacity units and write capacity units that you can provision
+// across all of your DynamoDB tables in a given region. Also, there are per-table
+// limits that apply when you create a table there. For more information, see
+// Limits (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+// page in the Amazon DynamoDB Developer Guide.
+//
+// Although you can increase these limits by filing a case at AWS Support Center
+// (https://console.aws.amazon.com/support/home#/), obtaining the increase is
+// not instantaneous. The DescribeLimits action lets you write code to compare
+// the capacity you are currently using to those limits imposed by your account
+// so that you have enough time to apply for an increase before you hit a limit.
+//
+// For example, you could use one of the AWS SDKs to do the following:
+//
+// Call DescribeLimits for a particular region to obtain your current account
+// limits on provisioned capacity there.
+//
+// Create a variable to hold the aggregate read capacity units provisioned for
+// all your tables in that region, and one to hold the aggregate write capacity
+// units. Zero them both.
+//
+// Call ListTables to obtain a list of all your DynamoDB tables.
+//
+// For each table name listed by ListTables, do the following:
+//
+// Call DescribeTable with the table name.
+//
+// Use the data returned by DescribeTable to add the read capacity units and
+// write capacity units provisioned for the table itself to your variables.
+//
+// If the table has one or more global secondary indexes (GSIs), loop over these
+// GSIs and add their provisioned capacity values to your variables as well.
+//
+// Report the account limits for that region returned by DescribeLimits, along
+// with the total current provisioned capacity levels you have calculated.
+//
+// This will let you see whether you are getting close to your account-level
+// limits.
+//
+// The per-table limits apply only when you are creating a new table. They restrict
+// the sum of the provisioned capacity of the new table itself and all its global
+// secondary indexes.
+//
+// For existing tables and their GSIs, DynamoDB will not let you increase provisioned
+// capacity extremely rapidly, but the only upper limit that applies is that
+// the aggregate provisioned capacity over all your tables and GSIs cannot exceed
+// either of the per-account limits.
+//
+// DescribeLimits should only be called periodically. You can expect throttling
+// errors if you call it more than once in a minute.
+//
+// The DescribeLimits Request element has no content.
+//
+// // Example sending a request using the DescribeLimitsRequest method.
+// req := client.DescribeLimitsRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits
+func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) DescribeLimitsRequest {
+ op := &aws.Operation{
+ Name: opDescribeLimits,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeLimitsInput{}
+ }
+
+ output := &DescribeLimitsOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DescribeLimitsRequest{Request: req, Input: input, Copy: c.DescribeLimitsRequest}
+}
+
+const opDescribeTable = "DescribeTable"
+
+// DescribeTableRequest is a API request type for the DescribeTable API operation.
+type DescribeTableRequest struct {
+ *aws.Request
+ Input *DescribeTableInput
+ Copy func(*DescribeTableInput) DescribeTableRequest
+}
+
+// Send marshals and sends the DescribeTable API request.
+func (r DescribeTableRequest) Send() (*DescribeTableOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DescribeTableOutput), nil
+}
+
+// DescribeTableRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Returns information about the table, including the current status of the
+// table, when it was created, the primary key schema, and any indexes on the
+// table.
+//
+// If you issue a DescribeTable request immediately after a CreateTable request,
+// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable
+// uses an eventually consistent query, and the metadata for your table might
+// not be available at that moment. Wait for a few seconds, and then try the
+// DescribeTable request again.
+//
+// // Example sending a request using the DescribeTableRequest method.
+// req := client.DescribeTableRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable
+func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) DescribeTableRequest {
+ op := &aws.Operation{
+ Name: opDescribeTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeTableInput{}
+ }
+
+ output := &DescribeTableOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DescribeTableRequest{Request: req, Input: input, Copy: c.DescribeTableRequest}
+}
+
+const opDescribeTimeToLive = "DescribeTimeToLive"
+
+// DescribeTimeToLiveRequest is a API request type for the DescribeTimeToLive API operation.
+type DescribeTimeToLiveRequest struct {
+ *aws.Request
+ Input *DescribeTimeToLiveInput
+ Copy func(*DescribeTimeToLiveInput) DescribeTimeToLiveRequest
+}
+
+// Send marshals and sends the DescribeTimeToLive API request.
+func (r DescribeTimeToLiveRequest) Send() (*DescribeTimeToLiveOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DescribeTimeToLiveOutput), nil
+}
+
+// DescribeTimeToLiveRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Gives a description of the Time to Live (TTL) status on the specified table.
+//
+// // Example sending a request using the DescribeTimeToLiveRequest method.
+// req := client.DescribeTimeToLiveRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive
+func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) DescribeTimeToLiveRequest {
+ op := &aws.Operation{
+ Name: opDescribeTimeToLive,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeTimeToLiveInput{}
+ }
+
+ output := &DescribeTimeToLiveOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DescribeTimeToLiveRequest{Request: req, Input: input, Copy: c.DescribeTimeToLiveRequest}
+}
+
+const opGetItem = "GetItem"
+
+// GetItemRequest is a API request type for the GetItem API operation.
+type GetItemRequest struct {
+ *aws.Request
+ Input *GetItemInput
+ Copy func(*GetItemInput) GetItemRequest
+}
+
+// Send marshals and sends the GetItem API request.
+func (r GetItemRequest) Send() (*GetItemOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*GetItemOutput), nil
+}
+
+// GetItemRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// The GetItem operation returns a set of attributes for the item with the given
+// primary key. If there is no matching item, GetItem does not return any data
+// and there will be no Item element in the response.
+//
+// GetItem provides an eventually consistent read by default. If your application
+// requires a strongly consistent read, set ConsistentRead to true. Although
+// a strongly consistent read might take more time than an eventually consistent
+// read, it always returns the last updated value.
+//
+// // Example sending a request using the GetItemRequest method.
+// req := client.GetItemRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem
+func (c *DynamoDB) GetItemRequest(input *GetItemInput) GetItemRequest {
+ op := &aws.Operation{
+ Name: opGetItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetItemInput{}
+ }
+
+ output := &GetItemOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return GetItemRequest{Request: req, Input: input, Copy: c.GetItemRequest}
+}
+
+const opListBackups = "ListBackups"
+
+// ListBackupsRequest is a API request type for the ListBackups API operation.
+type ListBackupsRequest struct {
+ *aws.Request
+ Input *ListBackupsInput
+ Copy func(*ListBackupsInput) ListBackupsRequest
+}
+
+// Send marshals and sends the ListBackups API request.
+func (r ListBackupsRequest) Send() (*ListBackupsOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*ListBackupsOutput), nil
+}
+
+// ListBackupsRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// List backups associated with an AWS account. To list backups for a given
+// table, specify TableName. ListBackups returns a paginated list of results
+// with at most 1MB worth of items in a page. You can also specify a limit for
+// the maximum number of entries to be returned in a page.
+//
+// In the request, start time is inclusive but end time is exclusive. Note that
+// these limits are for the time at which the original backup was requested.
+//
+// You can call ListBackups a maximum of 5 times per second.
+//
+// // Example sending a request using the ListBackupsRequest method.
+// req := client.ListBackupsRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups
+func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) ListBackupsRequest {
+ op := &aws.Operation{
+ Name: opListBackups,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListBackupsInput{}
+ }
+
+ output := &ListBackupsOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return ListBackupsRequest{Request: req, Input: input, Copy: c.ListBackupsRequest}
+}
+
+const opListGlobalTables = "ListGlobalTables"
+
+// ListGlobalTablesRequest is a API request type for the ListGlobalTables API operation.
+type ListGlobalTablesRequest struct {
+ *aws.Request
+ Input *ListGlobalTablesInput
+ Copy func(*ListGlobalTablesInput) ListGlobalTablesRequest
+}
+
+// Send marshals and sends the ListGlobalTables API request.
+func (r ListGlobalTablesRequest) Send() (*ListGlobalTablesOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*ListGlobalTablesOutput), nil
+}
+
+// ListGlobalTablesRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Lists all global tables that have a replica in the specified region.
+//
+// // Example sending a request using the ListGlobalTablesRequest method.
+// req := client.ListGlobalTablesRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables
+func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) ListGlobalTablesRequest {
+ op := &aws.Operation{
+ Name: opListGlobalTables,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListGlobalTablesInput{}
+ }
+
+ output := &ListGlobalTablesOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return ListGlobalTablesRequest{Request: req, Input: input, Copy: c.ListGlobalTablesRequest}
+}
+
+const opListTables = "ListTables"
+
+// ListTablesRequest is a API request type for the ListTables API operation.
+type ListTablesRequest struct {
+ *aws.Request
+ Input *ListTablesInput
+ Copy func(*ListTablesInput) ListTablesRequest
+}
+
+// Send marshals and sends the ListTables API request.
+func (r ListTablesRequest) Send() (*ListTablesOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*ListTablesOutput), nil
+}
+
+// ListTablesRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Returns an array of table names associated with the current account and endpoint.
+// The output from ListTables is paginated, with each page returning a maximum
+// of 100 table names.
+//
+// // Example sending a request using the ListTablesRequest method.
+// req := client.ListTablesRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables
+func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) ListTablesRequest {
+ op := &aws.Operation{
+ Name: opListTables,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &aws.Paginator{
+ InputTokens: []string{"ExclusiveStartTableName"},
+ OutputTokens: []string{"LastEvaluatedTableName"},
+ LimitToken: "Limit",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListTablesInput{}
+ }
+
+ output := &ListTablesOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return ListTablesRequest{Request: req, Input: input, Copy: c.ListTablesRequest}
+}
+
+// Paginate pages iterates over the pages of a ListTablesRequest operation,
+// calling the Next method for each page. Using the paginators Next
+// method will depict whether or not there are more pages.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListTables operation.
+// req := client.ListTablesRequest(input)
+// p := req.Paginate()
+// for p.Next() {
+// page := p.CurrentPage()
+// }
+//
+// if err := p.Err(); err != nil {
+// return err
+// }
+//
+func (p *ListTablesRequest) Paginate(opts ...aws.Option) ListTablesPager {
+ return ListTablesPager{
+ Pager: aws.Pager{
+ NewRequest: func() (*aws.Request, error) {
+ var inCpy *ListTablesInput
+ if p.Input != nil {
+ tmp := *p.Input
+ inCpy = &tmp
+ }
+
+ req := p.Copy(inCpy)
+ req.ApplyOptions(opts...)
+
+ return req.Request, nil
+ },
+ },
+ }
+}
+
+// ListTablesPager is used to paginate the request. This can be done by
+// calling Next and CurrentPage.
+type ListTablesPager struct {
+ aws.Pager
+}
+
+func (p *ListTablesPager) CurrentPage() *ListTablesOutput {
+ return p.Pager.CurrentPage().(*ListTablesOutput)
+}
+
+const opListTagsOfResource = "ListTagsOfResource"
+
+// ListTagsOfResourceRequest is a API request type for the ListTagsOfResource API operation.
+type ListTagsOfResourceRequest struct {
+ *aws.Request
+ Input *ListTagsOfResourceInput
+ Copy func(*ListTagsOfResourceInput) ListTagsOfResourceRequest
+}
+
+// Send marshals and sends the ListTagsOfResource API request.
+func (r ListTagsOfResourceRequest) Send() (*ListTagsOfResourceOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*ListTagsOfResourceOutput), nil
+}
+
+// ListTagsOfResourceRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource
+// up to 10 times per second, per account.
+//
+// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// // Example sending a request using the ListTagsOfResourceRequest method.
+// req := client.ListTagsOfResourceRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource
+func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) ListTagsOfResourceRequest {
+ op := &aws.Operation{
+ Name: opListTagsOfResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListTagsOfResourceInput{}
+ }
+
+ output := &ListTagsOfResourceOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return ListTagsOfResourceRequest{Request: req, Input: input, Copy: c.ListTagsOfResourceRequest}
+}
+
+const opPutItem = "PutItem"
+
+// PutItemRequest is a API request type for the PutItem API operation.
+type PutItemRequest struct {
+ *aws.Request
+ Input *PutItemInput
+ Copy func(*PutItemInput) PutItemRequest
+}
+
+// Send marshals and sends the PutItem API request.
+func (r PutItemRequest) Send() (*PutItemOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*PutItemOutput), nil
+}
+
+// PutItemRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Creates a new item, or replaces an old item with a new item. If an item that
+// has the same primary key as the new item already exists in the specified
+// table, the new item completely replaces the existing item. You can perform
+// a conditional put operation (add a new item if one with the specified primary
+// key doesn't exist), or replace an existing item if it has certain attribute
+// values. You can return the item's attribute values in the same operation,
+// using the ReturnValues parameter.
+//
+// This topic provides general information about the PutItem API.
+//
+// For information on how to call the PutItem API using the AWS SDK in specific
+// languages, see the following:
+//
+// PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem)
+//
+// PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem)
+//
+// PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem)
+//
+// PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem)
+//
+// PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem)
+//
+// PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem)
+//
+// PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem)
+//
+// PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem)
+//
+// PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem)
+//
+// When you add an item, the primary key attribute(s) are the only required
+// attributes. Attribute values cannot be null. String and Binary type attributes
+// must have lengths greater than zero. Set type attributes cannot be empty.
+// Requests with empty values will be rejected with a ValidationException exception.
+//
+// To prevent a new item from replacing an existing item, use a conditional
+// expression that contains the attribute_not_exists function with the name
+// of the attribute being used as the partition key for the table. Since every
+// record must contain that attribute, the attribute_not_exists function will
+// only succeed if no matching item exists.
+//
+// For more information about PutItem, see Working with Items (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// // Example sending a request using the PutItemRequest method.
+// req := client.PutItemRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem
+func (c *DynamoDB) PutItemRequest(input *PutItemInput) PutItemRequest {
+ op := &aws.Operation{
+ Name: opPutItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PutItemInput{}
+ }
+
+ output := &PutItemOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return PutItemRequest{Request: req, Input: input, Copy: c.PutItemRequest}
+}
+
+const opQuery = "Query"
+
+// QueryRequest is a API request type for the Query API operation.
+type QueryRequest struct {
+ *aws.Request
+ Input *QueryInput
+ Copy func(*QueryInput) QueryRequest
+}
+
+// Send marshals and sends the Query API request.
+func (r QueryRequest) Send() (*QueryOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*QueryOutput), nil
+}
+
+// QueryRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// The Query operation finds items based on primary key values. You can query
+// any table or secondary index that has a composite primary key (a partition
+// key and a sort key).
+//
+// Use the KeyConditionExpression parameter to provide a specific value for
+// the partition key. The Query operation will return all of the items from
+// the table or index with that partition key value. You can optionally narrow
+// the scope of the Query operation by specifying a sort key value and a comparison
+// operator in KeyConditionExpression. To further refine the Query results,
+// you can optionally provide a FilterExpression. A FilterExpression determines
+// which items within the results should be returned to you. All of the other
+// results are discarded.
+//
+// A Query operation always returns a result set. If no matching items are found,
+// the result set will be empty. Queries that do not return results consume
+// the minimum number of read capacity units for that type of read operation.
+//
+// DynamoDB calculates the number of read capacity units consumed based on item
+// size, not on the amount of data that is returned to an application. The number
+// of capacity units consumed will be the same whether you request all of the
+// attributes (the default behavior) or just some of them (using a projection
+// expression). The number will also be the same whether or not you use a FilterExpression.
+//
+// Query results are always sorted by the sort key value. If the data type of
+// the sort key is Number, the results are returned in numeric order; otherwise,
+// the results are returned in order of UTF-8 bytes. By default, the sort order
+// is ascending. To reverse the order, set the ScanIndexForward parameter to
+// false.
+//
+// A single Query operation will read up to the maximum number of items set
+// (if using the Limit parameter) or a maximum of 1 MB of data and then apply
+// any filtering to the results using FilterExpression. If LastEvaluatedKey
+// is present in the response, you will need to paginate the result set. For
+// more information, see Paginating the Results (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination)
+// in the Amazon DynamoDB Developer Guide.
+//
+// FilterExpression is applied after a Query finishes, but before the results
+// are returned. A FilterExpression cannot contain partition key or sort key
+// attributes. You need to specify those attributes in the KeyConditionExpression.
+//
+// A Query operation can return an empty result set and a LastEvaluatedKey if
+// all the items read for the page of results are filtered out.
+//
+// You can query a table, a local secondary index, or a global secondary index.
+// For a query on a table or on a local secondary index, you can set the ConsistentRead
+// parameter to true and obtain a strongly consistent result. Global secondary
+// indexes support eventually consistent reads only, so do not specify ConsistentRead
+// when querying a global secondary index.
+//
+// // Example sending a request using the QueryRequest method.
+// req := client.QueryRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query
+func (c *DynamoDB) QueryRequest(input *QueryInput) QueryRequest {
+ op := &aws.Operation{
+ Name: opQuery,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &aws.Paginator{
+ InputTokens: []string{"ExclusiveStartKey"},
+ OutputTokens: []string{"LastEvaluatedKey"},
+ LimitToken: "Limit",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &QueryInput{}
+ }
+
+ output := &QueryOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return QueryRequest{Request: req, Input: input, Copy: c.QueryRequest}
+}
+
+// Paginate pages iterates over the pages of a QueryRequest operation,
+// calling the Next method for each page. Using the paginators Next
+// method will depict whether or not there are more pages.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a Query operation.
+// req := client.QueryRequest(input)
+// p := req.Paginate()
+// for p.Next() {
+// page := p.CurrentPage()
+// }
+//
+// if err := p.Err(); err != nil {
+// return err
+// }
+//
+func (p *QueryRequest) Paginate(opts ...aws.Option) QueryPager {
+ return QueryPager{
+ Pager: aws.Pager{
+ NewRequest: func() (*aws.Request, error) {
+ var inCpy *QueryInput
+ if p.Input != nil {
+ tmp := *p.Input
+ inCpy = &tmp
+ }
+
+ req := p.Copy(inCpy)
+ req.ApplyOptions(opts...)
+
+ return req.Request, nil
+ },
+ },
+ }
+}
+
+// QueryPager is used to paginate the request. This can be done by
+// calling Next and CurrentPage.
+type QueryPager struct {
+ aws.Pager
+}
+
+func (p *QueryPager) CurrentPage() *QueryOutput {
+ return p.Pager.CurrentPage().(*QueryOutput)
+}
+
+const opRestoreTableFromBackup = "RestoreTableFromBackup"
+
+// RestoreTableFromBackupRequest is a API request type for the RestoreTableFromBackup API operation.
+type RestoreTableFromBackupRequest struct {
+ *aws.Request
+ Input *RestoreTableFromBackupInput
+ Copy func(*RestoreTableFromBackupInput) RestoreTableFromBackupRequest
+}
+
+// Send marshals and sends the RestoreTableFromBackup API request.
+func (r RestoreTableFromBackupRequest) Send() (*RestoreTableFromBackupOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*RestoreTableFromBackupOutput), nil
+}
+
+// RestoreTableFromBackupRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Creates a new table from an existing backup. Any number of users can execute
+// up to 4 concurrent restores (any type of restore) in a given account.
+//
+// You can call RestoreTableFromBackup at a maximum rate of 10 times per second.
+//
+// You must manually set up the following on the restored table:
+//
+// * Auto scaling policies
+//
+// * IAM policies
+//
+// * Cloudwatch metrics and alarms
+//
+// * Tags
+//
+// * Stream settings
+//
+// * Time to Live (TTL) settings
+//
+// // Example sending a request using the RestoreTableFromBackupRequest method.
+// req := client.RestoreTableFromBackupRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup
+func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) RestoreTableFromBackupRequest {
+ op := &aws.Operation{
+ Name: opRestoreTableFromBackup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RestoreTableFromBackupInput{}
+ }
+
+ output := &RestoreTableFromBackupOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return RestoreTableFromBackupRequest{Request: req, Input: input, Copy: c.RestoreTableFromBackupRequest}
+}
+
+const opRestoreTableToPointInTime = "RestoreTableToPointInTime"
+
+// RestoreTableToPointInTimeRequest is a API request type for the RestoreTableToPointInTime API operation.
+type RestoreTableToPointInTimeRequest struct {
+ *aws.Request
+ Input *RestoreTableToPointInTimeInput
+ Copy func(*RestoreTableToPointInTimeInput) RestoreTableToPointInTimeRequest
+}
+
+// Send marshals and sends the RestoreTableToPointInTime API request.
+func (r RestoreTableToPointInTimeRequest) Send() (*RestoreTableToPointInTimeOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*RestoreTableToPointInTimeOutput), nil
+}
+
+// RestoreTableToPointInTimeRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Restores the specified table to the specified point in time within EarliestRestorableDateTime
+// and LatestRestorableDateTime. You can restore your table to any point in
+// time during the last 35 days. Any number of users can execute up to 4 concurrent
+// restores (any type of restore) in a given account.
+//
+// When you restore using point in time recovery, DynamoDB restores your table
+// data to the state based on the selected date and time (day:hour:minute:second)
+// to a new table.
+//
+// Along with data, the following are also included on the new restored table
+// using point in time recovery:
+//
+// * Global secondary indexes (GSIs)
+//
+// * Local secondary indexes (LSIs)
+//
+// * Provisioned read and write capacity
+//
+// * Encryption settings
+//
+// All these settings come from the current settings of the source table at
+// the time of restore.
+//
+// You must manually set up the following on the restored table:
+//
+// * Auto scaling policies
+//
+// * IAM policies
+//
+// * Cloudwatch metrics and alarms
+//
+// * Tags
+//
+// * Stream settings
+//
+// * Time to Live (TTL) settings
+//
+// * Point in time recovery settings
+//
+// // Example sending a request using the RestoreTableToPointInTimeRequest method.
+// req := client.RestoreTableToPointInTimeRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime
+func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) RestoreTableToPointInTimeRequest {
+ op := &aws.Operation{
+ Name: opRestoreTableToPointInTime,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RestoreTableToPointInTimeInput{}
+ }
+
+ output := &RestoreTableToPointInTimeOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return RestoreTableToPointInTimeRequest{Request: req, Input: input, Copy: c.RestoreTableToPointInTimeRequest}
+}
+
+const opScan = "Scan"
+
+// ScanRequest is a API request type for the Scan API operation.
+type ScanRequest struct {
+ *aws.Request
+ Input *ScanInput
+ Copy func(*ScanInput) ScanRequest
+}
+
+// Send marshals and sends the Scan API request.
+func (r ScanRequest) Send() (*ScanOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*ScanOutput), nil
+}
+
+// ScanRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// The Scan operation returns one or more items and item attributes by accessing
+// every item in a table or a secondary index. To have DynamoDB return fewer
+// items, you can provide a FilterExpression operation.
+//
+// If the total number of scanned items exceeds the maximum data set size limit
+// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey
+// value to continue the scan in a subsequent operation. The results also include
+// the number of items exceeding the limit. A scan can result in no table data
+// meeting the filter criteria.
+//
+// A single Scan operation will read up to the maximum number of items set (if
+// using the Limit parameter) or a maximum of 1 MB of data and then apply any
+// filtering to the results using FilterExpression. If LastEvaluatedKey is present
+// in the response, you will need to paginate the result set. For more information,
+// see Paginating the Results (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Scan operations proceed sequentially; however, for faster performance on
+// a large table or secondary index, applications can request a parallel Scan
+// operation by providing the Segment and TotalSegments parameters. For more
+// information, see Parallel Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Scan uses eventually consistent reads when accessing the data in a table;
+// therefore, the result set might not include the changes to data in the table
+// immediately before the operation began. If you need a consistent copy of
+// the data, as of the time that the Scan begins, you can set the ConsistentRead
+// parameter to true.
+//
+// // Example sending a request using the ScanRequest method.
+// req := client.ScanRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan
+func (c *DynamoDB) ScanRequest(input *ScanInput) ScanRequest {
+ op := &aws.Operation{
+ Name: opScan,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &aws.Paginator{
+ InputTokens: []string{"ExclusiveStartKey"},
+ OutputTokens: []string{"LastEvaluatedKey"},
+ LimitToken: "Limit",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ScanInput{}
+ }
+
+ output := &ScanOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return ScanRequest{Request: req, Input: input, Copy: c.ScanRequest}
+}
+
+// Paginate pages iterates over the pages of a ScanRequest operation,
+// calling the Next method for each page. Using the paginators Next
+// method will depict whether or not there are more pages.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a Scan operation.
+// req := client.ScanRequest(input)
+// p := req.Paginate()
+// for p.Next() {
+// page := p.CurrentPage()
+// }
+//
+// if err := p.Err(); err != nil {
+// return err
+// }
+//
+func (p *ScanRequest) Paginate(opts ...aws.Option) ScanPager {
+ return ScanPager{
+ Pager: aws.Pager{
+ NewRequest: func() (*aws.Request, error) {
+ var inCpy *ScanInput
+ if p.Input != nil {
+ tmp := *p.Input
+ inCpy = &tmp
+ }
+
+ req := p.Copy(inCpy)
+ req.ApplyOptions(opts...)
+
+ return req.Request, nil
+ },
+ },
+ }
+}
+
+// ScanPager is used to paginate the request. This can be done by
+// calling Next and CurrentPage.
+type ScanPager struct {
+ aws.Pager
+}
+
+func (p *ScanPager) CurrentPage() *ScanOutput {
+ return p.Pager.CurrentPage().(*ScanOutput)
+}
+
+const opTagResource = "TagResource"
+
+// TagResourceRequest is a API request type for the TagResource API operation.
+type TagResourceRequest struct {
+ *aws.Request
+ Input *TagResourceInput
+ Copy func(*TagResourceInput) TagResourceRequest
+}
+
+// Send marshals and sends the TagResource API request.
+func (r TagResourceRequest) Send() (*TagResourceOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*TagResourceOutput), nil
+}
+
+// TagResourceRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Associate a set of tags with an Amazon DynamoDB resource. You can then activate
+// these user-defined tags so that they appear on the Billing and Cost Management
+// console for cost allocation tracking. You can call TagResource up to 5 times
+// per second, per account.
+//
+// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// // Example sending a request using the TagResourceRequest method.
+// req := client.TagResourceRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource
+func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) TagResourceRequest {
+ op := &aws.Operation{
+ Name: opTagResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &TagResourceInput{}
+ }
+
+ output := &TagResourceOutput{}
+ req := c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return TagResourceRequest{Request: req, Input: input, Copy: c.TagResourceRequest}
+}
+
+const opUntagResource = "UntagResource"
+
+// UntagResourceRequest is a API request type for the UntagResource API operation.
+type UntagResourceRequest struct {
+ *aws.Request
+ Input *UntagResourceInput
+ Copy func(*UntagResourceInput) UntagResourceRequest
+}
+
+// Send marshals and sends the UntagResource API request.
+func (r UntagResourceRequest) Send() (*UntagResourceOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*UntagResourceOutput), nil
+}
+
+// UntagResourceRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Removes the association of tags from an Amazon DynamoDB resource. You can
+// call UntagResource up to 5 times per second, per account.
+//
+// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// // Example sending a request using the UntagResourceRequest method.
+// req := client.UntagResourceRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource
+func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) UntagResourceRequest {
+ op := &aws.Operation{
+ Name: opUntagResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UntagResourceInput{}
+ }
+
+ output := &UntagResourceOutput{}
+ req := c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return UntagResourceRequest{Request: req, Input: input, Copy: c.UntagResourceRequest}
+}
+
+const opUpdateContinuousBackups = "UpdateContinuousBackups"
+
+// UpdateContinuousBackupsRequest is a API request type for the UpdateContinuousBackups API operation.
+type UpdateContinuousBackupsRequest struct {
+ *aws.Request
+ Input *UpdateContinuousBackupsInput
+ Copy func(*UpdateContinuousBackupsInput) UpdateContinuousBackupsRequest
+}
+
+// Send marshals and sends the UpdateContinuousBackups API request.
+func (r UpdateContinuousBackupsRequest) Send() (*UpdateContinuousBackupsOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*UpdateContinuousBackupsOutput), nil
+}
+
+// UpdateContinuousBackupsRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// UpdateContinuousBackups enables or disables point in time recovery for the
+// specified table. A successful UpdateContinuousBackups call returns the current
+// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables
+// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus
+// will be set to ENABLED.
+//
+// Once continuous backups and point in time recovery are enabled, you can restore
+// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.
+//
+// LatestRestorableDateTime is typically 5 minutes before the current time.
+// You can restore your table to any point in time during the last 35 days..
+//
+// // Example sending a request using the UpdateContinuousBackupsRequest method.
+// req := client.UpdateContinuousBackupsRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups
+func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) UpdateContinuousBackupsRequest {
+ op := &aws.Operation{
+ Name: opUpdateContinuousBackups,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateContinuousBackupsInput{}
+ }
+
+ output := &UpdateContinuousBackupsOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return UpdateContinuousBackupsRequest{Request: req, Input: input, Copy: c.UpdateContinuousBackupsRequest}
+}
+
+const opUpdateGlobalTable = "UpdateGlobalTable"
+
+// UpdateGlobalTableRequest is a API request type for the UpdateGlobalTable API operation.
+type UpdateGlobalTableRequest struct {
+ *aws.Request
+ Input *UpdateGlobalTableInput
+ Copy func(*UpdateGlobalTableInput) UpdateGlobalTableRequest
+}
+
+// Send marshals and sends the UpdateGlobalTable API request.
+func (r UpdateGlobalTableRequest) Send() (*UpdateGlobalTableOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*UpdateGlobalTableOutput), nil
+}
+
+// UpdateGlobalTableRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Adds or removes replicas in the specified global table. The global table
+// must already exist to be able to use this operation. Any replica to be added
+// must be empty, must have the same name as the global table, must have the
+// same key schema, and must have DynamoDB Streams enabled and must have same
+// provisioned and maximum write capacity units.
+//
+// Although you can use UpdateGlobalTable to add replicas and remove replicas
+// in a single request, for simplicity we recommend that you issue separate
+// requests for adding or removing replicas.
+//
+// If global secondary indexes are specified, then the following conditions
+// must also be met:
+//
+// * The global secondary indexes must have the same name.
+//
+// * The global secondary indexes must have the same hash key and sort key
+// (if present).
+//
+// * The global secondary indexes must have the same provisioned and maximum
+// write capacity units.
+//
+// // Example sending a request using the UpdateGlobalTableRequest method.
+// req := client.UpdateGlobalTableRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable
+func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) UpdateGlobalTableRequest {
+ op := &aws.Operation{
+ Name: opUpdateGlobalTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateGlobalTableInput{}
+ }
+
+ output := &UpdateGlobalTableOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return UpdateGlobalTableRequest{Request: req, Input: input, Copy: c.UpdateGlobalTableRequest}
+}
+
+const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings"
+
+// UpdateGlobalTableSettingsRequest is a API request type for the UpdateGlobalTableSettings API operation.
+type UpdateGlobalTableSettingsRequest struct {
+ *aws.Request
+ Input *UpdateGlobalTableSettingsInput
+ Copy func(*UpdateGlobalTableSettingsInput) UpdateGlobalTableSettingsRequest
+}
+
+// Send marshals and sends the UpdateGlobalTableSettings API request.
+func (r UpdateGlobalTableSettingsRequest) Send() (*UpdateGlobalTableSettingsOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*UpdateGlobalTableSettingsOutput), nil
+}
+
+// UpdateGlobalTableSettingsRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Updates settings for a global table.
+//
+// // Example sending a request using the UpdateGlobalTableSettingsRequest method.
+// req := client.UpdateGlobalTableSettingsRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings
+func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) UpdateGlobalTableSettingsRequest {
+ op := &aws.Operation{
+ Name: opUpdateGlobalTableSettings,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateGlobalTableSettingsInput{}
+ }
+
+ output := &UpdateGlobalTableSettingsOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return UpdateGlobalTableSettingsRequest{Request: req, Input: input, Copy: c.UpdateGlobalTableSettingsRequest}
+}
+
+const opUpdateItem = "UpdateItem"
+
+// UpdateItemRequest is a API request type for the UpdateItem API operation.
+type UpdateItemRequest struct {
+ *aws.Request
+ Input *UpdateItemInput
+ Copy func(*UpdateItemInput) UpdateItemRequest
+}
+
+// Send marshals and sends the UpdateItem API request.
+func (r UpdateItemRequest) Send() (*UpdateItemOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*UpdateItemOutput), nil
+}
+
+// UpdateItemRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Edits an existing item's attributes, or adds a new item to the table if it
+// does not already exist. You can put, delete, or add attribute values. You
+// can also perform a conditional update on an existing item (insert a new attribute
+// name-value pair if it doesn't exist, or replace an existing name-value pair
+// if it has certain expected attribute values).
+//
+// You can also return the item's attribute values in the same UpdateItem operation
+// using the ReturnValues parameter.
+//
+// // Example sending a request using the UpdateItemRequest method.
+// req := client.UpdateItemRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem
+func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) UpdateItemRequest {
+ op := &aws.Operation{
+ Name: opUpdateItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateItemInput{}
+ }
+
+ output := &UpdateItemOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return UpdateItemRequest{Request: req, Input: input, Copy: c.UpdateItemRequest}
+}
+
+const opUpdateTable = "UpdateTable"
+
+// UpdateTableRequest is a API request type for the UpdateTable API operation.
+type UpdateTableRequest struct {
+ *aws.Request
+ Input *UpdateTableInput
+ Copy func(*UpdateTableInput) UpdateTableRequest
+}
+
+// Send marshals and sends the UpdateTable API request.
+func (r UpdateTableRequest) Send() (*UpdateTableOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*UpdateTableOutput), nil
+}
+
+// UpdateTableRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// Modifies the provisioned throughput settings, global secondary indexes, or
+// DynamoDB Streams settings for a given table.
+//
+// You can only perform one of the following operations at once:
+//
+// * Modify the provisioned throughput settings of the table.
+//
+// * Enable or disable Streams on the table.
+//
+// * Remove a global secondary index from the table.
+//
+// * Create a new global secondary index on the table. Once the index begins
+// backfilling, you can use UpdateTable to perform other operations.
+//
+// UpdateTable is an asynchronous operation; while it is executing, the table
+// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot
+// issue another UpdateTable request. When the table returns to the ACTIVE state,
+// the UpdateTable operation is complete.
+//
+// // Example sending a request using the UpdateTableRequest method.
+// req := client.UpdateTableRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable
+func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) UpdateTableRequest {
+ op := &aws.Operation{
+ Name: opUpdateTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateTableInput{}
+ }
+
+ output := &UpdateTableOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return UpdateTableRequest{Request: req, Input: input, Copy: c.UpdateTableRequest}
+}
+
+const opUpdateTimeToLive = "UpdateTimeToLive"
+
+// UpdateTimeToLiveRequest is a API request type for the UpdateTimeToLive API operation.
+type UpdateTimeToLiveRequest struct {
+ *aws.Request
+ Input *UpdateTimeToLiveInput
+ Copy func(*UpdateTimeToLiveInput) UpdateTimeToLiveRequest
+}
+
+// Send marshals and sends the UpdateTimeToLive API request.
+func (r UpdateTimeToLiveRequest) Send() (*UpdateTimeToLiveOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*UpdateTimeToLiveOutput), nil
+}
+
+// UpdateTimeToLiveRequest returns a request value for making API operation for
+// Amazon DynamoDB.
+//
+// The UpdateTimeToLive method will enable or disable TTL for the specified
+// table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification;
+// it may take up to one hour for the change to fully process. Any additional
+// UpdateTimeToLive calls for the same table during this one hour duration result
+// in a ValidationException.
+//
+// TTL compares the current time in epoch time format to the time stored in
+// the TTL attribute of an item. If the epoch time value stored in the attribute
+// is less than the current time, the item is marked as expired and subsequently
+// deleted.
+//
+// The epoch time format is the number of seconds elapsed since 12:00:00 AM
+// January 1st, 1970 UTC.
+//
+// DynamoDB deletes expired items on a best-effort basis to ensure availability
+// of throughput for other data operations.
+//
+// DynamoDB typically deletes expired items within two days of expiration. The
+// exact duration within which an item gets deleted after expiration is specific
+// to the nature of the workload. Items that have expired and not been deleted
+// will still show up in reads, queries, and scans.
+//
+// As items are deleted, they are removed from any Local Secondary Index and
+// Global Secondary Index immediately in the same eventually consistent way
+// as a standard delete operation.
+//
+// For more information, see Time To Live (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// // Example sending a request using the UpdateTimeToLiveRequest method.
+// req := client.UpdateTimeToLiveRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive
+func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) UpdateTimeToLiveRequest {
+ op := &aws.Operation{
+ Name: opUpdateTimeToLive,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateTimeToLiveInput{}
+ }
+
+ output := &UpdateTimeToLiveOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return UpdateTimeToLiveRequest{Request: req, Input: input, Copy: c.UpdateTimeToLiveRequest}
+}
+
+// Represents an attribute for describing the key schema for the table and indexes.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AttributeDefinition
+type AttributeDefinition struct {
+ _ struct{} `type:"structure"`
+
+ // A name for the attribute.
+ //
+ // AttributeName is a required field
+ AttributeName *string `min:"1" type:"string" required:"true"`
+
+ // The data type for the attribute, where:
+ //
+ // * S - the attribute is of type String
+ //
+ // * N - the attribute is of type Number
+ //
+ // * B - the attribute is of type Binary
+ //
+ // AttributeType is a required field
+ AttributeType ScalarAttributeType `type:"string" required:"true" enum:"true"`
+}
+
+// String returns the string representation
+func (s AttributeDefinition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttributeDefinition) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttributeDefinition) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "AttributeDefinition"}
+
+ if s.AttributeName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("AttributeName"))
+ }
+ if s.AttributeName != nil && len(*s.AttributeName) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("AttributeName", 1))
+ }
+ if len(s.AttributeType) == 0 {
+ invalidParams.Add(aws.NewErrParamRequired("AttributeType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the data for an attribute.
+//
+// Each attribute value is described as a name-value pair. The name is the data
+// type, and the value is the data itself.
+//
+// For more information, see Data Types (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
+// in the Amazon DynamoDB Developer Guide.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AttributeValue
+type AttributeValue struct {
+ _ struct{} `type:"structure"`
+
+ // An attribute of type Binary. For example:
+ //
+ // "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"
+ //
+ // B is automatically base64 encoded/decoded by the SDK.
+ B []byte `type:"blob"`
+
+ // An attribute of type Boolean. For example:
+ //
+ // "BOOL": true
+ BOOL *bool `type:"boolean"`
+
+ // An attribute of type Binary Set. For example:
+ //
+ // "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]
+ BS [][]byte `type:"list"`
+
+ // An attribute of type List. For example:
+ //
+ // "L": ["Cookies", "Coffee", 3.14159]
+ L []AttributeValue `type:"list"`
+
+ // An attribute of type Map. For example:
+ //
+ // "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}
+ M map[string]AttributeValue `type:"map"`
+
+ // An attribute of type Number. For example:
+ //
+ // "N": "123.45"
+ //
+ // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility
+ // across languages and libraries. However, DynamoDB treats them as number type
+ // attributes for mathematical operations.
+ N *string `type:"string"`
+
+ // An attribute of type Number Set. For example:
+ //
+ // "NS": ["42.2", "-19", "7.5", "3.14"]
+ //
+ // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility
+ // across languages and libraries. However, DynamoDB treats them as number type
+ // attributes for mathematical operations.
+ NS []string `type:"list"`
+
+ // An attribute of type Null. For example:
+ //
+ // "NULL": true
+ NULL *bool `type:"boolean"`
+
+ // An attribute of type String. For example:
+ //
+ // "S": "Hello"
+ S *string `type:"string"`
+
+ // An attribute of type String Set. For example:
+ //
+ // "SS": ["Giraffe", "Hippo" ,"Zebra"]
+ SS []string `type:"list"`
+}
+
+// String returns the string representation
+func (s AttributeValue) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttributeValue) GoString() string {
+ return s.String()
+}
+
+// For the UpdateItem operation, represents the attributes to be modified, the
+// action to perform on each, and the new value for each.
+//
+// You cannot use UpdateItem to update any primary key attributes. Instead,
+// you will need to delete the item, and then use PutItem to create a new item
+// with new attributes.
+//
+// Attribute values cannot be null; string and binary type attributes must have
+// lengths greater than zero; and set type attributes must not be empty. Requests
+// with empty values will be rejected with a ValidationException exception.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AttributeValueUpdate
+type AttributeValueUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies how to perform the update. Valid values are PUT (default), DELETE,
+ // and ADD. The behavior depends on whether the specified primary key already
+ // exists in the table.
+ //
+ // If an item with the specified Key is found in the table:
+ //
+ // * PUT - Adds the specified attribute to the item. If the attribute already
+ // exists, it is replaced by the new value.
+ //
+ // * DELETE - If no value is specified, the attribute and its value are removed
+ // from the item. The data type of the specified value must match the existing
+ // value's data type.
+ //
+ // If a set of values is specified, then those values are subtracted from the
+ // old set. For example, if the attribute value was the set [a,b,c] and the
+ // DELETE action specified [a,c], then the final attribute value would be
+ // [b]. Specifying an empty set is an error.
+ //
+ // * ADD - If the attribute does not already exist, then the attribute and
+ // its values are added to the item. If the attribute does exist, then the
+ // behavior of ADD depends on the data type of the attribute:
+ //
+ // If the existing attribute is a number, and if Value is also a number, then
+ // the Value is mathematically added to the existing attribute. If Value
+ // is a negative number, then it is subtracted from the existing attribute.
+ //
+ // If you use ADD to increment or decrement a number value for an item that
+ // doesn't exist before the update, DynamoDB uses 0 as the initial value.
+ //
+ // In addition, if you use ADD to update an existing item, and intend to increment
+ // or decrement an attribute value which does not yet exist, DynamoDB uses
+ // 0 as the initial value. For example, suppose that the item you want to
+ // update does not yet have an attribute named itemcount, but you decide
+ // to ADD the number 3 to this attribute anyway, even though it currently
+ // does not exist. DynamoDB will create the itemcount attribute, set its
+ // initial value to 0, and finally add 3 to it. The result will be a new
+ // itemcount attribute in the item, with a value of 3.
+ //
+ // If the existing data type is a set, and if the Value is also a set, then
+ // the Value is added to the existing set. (This is a set operation, not
+ // mathematical addition.) For example, if the attribute value was the set
+ // [1,2], and the ADD action specified [3], then the final attribute value
+ // would be [1,2,3]. An error occurs if an Add action is specified for a
+ // set attribute and the attribute type specified does not match the existing
+ // set type.
+ //
+ // Both sets must have the same primitive data type. For example, if the existing
+ // data type is a set of strings, the Value must also be a set of strings.
+ // The same holds true for number sets and binary sets.
+ //
+ // This action is only valid for an existing attribute whose data type is number
+ // or is a set. Do not use ADD for any other data types.
+ //
+ // If no item with the specified Key is found:
+ //
+ // * PUT - DynamoDB creates a new item with the specified primary key, and
+ // then adds the attribute.
+ //
+ // * DELETE - Nothing happens; there is no attribute to delete.
+ //
+ // * ADD - DynamoDB creates an item with the supplied primary key and number
+ // (or set of numbers) for the attribute value. The only data types allowed
+ // are number and number set; no other data types can be specified.
+ Action AttributeAction `type:"string" enum:"true"`
+
+ // Represents the data for an attribute.
+ //
+ // Each attribute value is described as a name-value pair. The name is the data
+ // type, and the value is the data itself.
+ //
+ // For more information, see Data Types (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
+ // in the Amazon DynamoDB Developer Guide.
+ Value *AttributeValue `type:"structure"`
+}
+
+// String returns the string representation
+func (s AttributeValueUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttributeValueUpdate) GoString() string {
+ return s.String()
+}
+
+// Represents the properties of the scaling policy.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AutoScalingPolicyDescription
+type AutoScalingPolicyDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the scaling policy.
+ PolicyName *string `min:"1" type:"string"`
+
+ // Represents a target tracking scaling policy configuration.
+ TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s AutoScalingPolicyDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingPolicyDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the autoscaling policy to be modified.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AutoScalingPolicyUpdate
+type AutoScalingPolicyUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the scaling policy.
+ PolicyName *string `min:"1" type:"string"`
+
+ // Represents a target tracking scaling policy configuration.
+ //
+ // TargetTrackingScalingPolicyConfiguration is a required field
+ TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AutoScalingPolicyUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingPolicyUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AutoScalingPolicyUpdate) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "AutoScalingPolicyUpdate"}
+ if s.PolicyName != nil && len(*s.PolicyName) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1))
+ }
+
+ if s.TargetTrackingScalingPolicyConfiguration == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration"))
+ }
+ if s.TargetTrackingScalingPolicyConfiguration != nil {
+ if err := s.TargetTrackingScalingPolicyConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the autoscaling settings for a global table or global secondary
+// index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AutoScalingSettingsDescription
+type AutoScalingSettingsDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Disabled autoscaling for this global table or global secondary index.
+ AutoScalingDisabled *bool `type:"boolean"`
+
+ // Role ARN used for configuring autoScaling policy.
+ AutoScalingRoleArn *string `type:"string"`
+
+ // The maximum capacity units that a global table or global secondary index
+ // should be scaled up to.
+ MaximumUnits *int64 `min:"1" type:"long"`
+
+ // The minimum capacity units that a global table or global secondary index
+ // should be scaled down to.
+ MinimumUnits *int64 `min:"1" type:"long"`
+
+ // Information about the scaling policies.
+ ScalingPolicies []AutoScalingPolicyDescription `type:"list"`
+}
+
+// String returns the string representation
+func (s AutoScalingSettingsDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingSettingsDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the autoscaling settings to be modified for a global table or
+// global secondary index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AutoScalingSettingsUpdate
+type AutoScalingSettingsUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // Disabled autoscaling for this global table or global secondary index.
+ AutoScalingDisabled *bool `type:"boolean"`
+
+ // Role ARN used for configuring autoscaling policy.
+ AutoScalingRoleArn *string `min:"1" type:"string"`
+
+ // The maximum capacity units that a global table or global secondary index
+ // should be scaled up to.
+ MaximumUnits *int64 `min:"1" type:"long"`
+
+ // The minimum capacity units that a global table or global secondary index
+ // should be scaled down to.
+ MinimumUnits *int64 `min:"1" type:"long"`
+
+ // The scaling policy to apply for scaling target global table or global secondary
+ // index capacity units.
+ ScalingPolicyUpdate *AutoScalingPolicyUpdate `type:"structure"`
+}
+
+// String returns the string representation
+func (s AutoScalingSettingsUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingSettingsUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AutoScalingSettingsUpdate) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "AutoScalingSettingsUpdate"}
+ if s.AutoScalingRoleArn != nil && len(*s.AutoScalingRoleArn) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("AutoScalingRoleArn", 1))
+ }
+ if s.MaximumUnits != nil && *s.MaximumUnits < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("MaximumUnits", 1))
+ }
+ if s.MinimumUnits != nil && *s.MinimumUnits < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("MinimumUnits", 1))
+ }
+ if s.ScalingPolicyUpdate != nil {
+ if err := s.ScalingPolicyUpdate.Validate(); err != nil {
+ invalidParams.AddNested("ScalingPolicyUpdate", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the properties of a target tracking scaling policy.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AutoScalingTargetTrackingScalingPolicyConfigurationDescription
+type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether scale in by the target tracking policy is disabled. If
+ // the value is true, scale in is disabled and the target tracking policy won't
+ // remove capacity from the scalable resource. Otherwise, scale in is enabled
+ // and the target tracking policy can remove capacity from the scalable resource.
+ // The default value is false.
+ DisableScaleIn *bool `type:"boolean"`
+
+ // The amount of time, in seconds, after a scale in activity completes before
+ // another scale in activity can start. The cooldown period is used to block
+ // subsequent scale in requests until it has expired. You should scale in conservatively
+ // to protect your application's availability. However, if another alarm triggers
+ // a scale out policy during the cooldown period after a scale-in, application
+ // autoscaling scales out your scalable target immediately.
+ ScaleInCooldown *int64 `type:"integer"`
+
+ // The amount of time, in seconds, after a scale out activity completes before
+ // another scale out activity can start. While the cooldown period is in effect,
+ // the capacity that has been added by the previous scale out event that initiated
+ // the cooldown is calculated as part of the desired capacity for the next scale
+ // out. You should continuously (but not excessively) scale out.
+ ScaleOutCooldown *int64 `type:"integer"`
+
+ // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108
+ // (Base 10) or 2e-360 to 2e360 (Base 2).
+ //
+ // TargetValue is a required field
+ TargetValue *float64 `type:"double" required:"true"`
+}
+
+// String returns the string representation
+func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the settings of a target tracking scaling policy that will be
+// modified.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/AutoScalingTargetTrackingScalingPolicyConfigurationUpdate
+type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether scale in by the target tracking policy is disabled. If
+ // the value is true, scale in is disabled and the target tracking policy won't
+ // remove capacity from the scalable resource. Otherwise, scale in is enabled
+ // and the target tracking policy can remove capacity from the scalable resource.
+ // The default value is false.
+ DisableScaleIn *bool `type:"boolean"`
+
+ // The amount of time, in seconds, after a scale in activity completes before
+ // another scale in activity can start. The cooldown period is used to block
+ // subsequent scale in requests until it has expired. You should scale in conservatively
+ // to protect your application's availability. However, if another alarm triggers
+ // a scale out policy during the cooldown period after a scale-in, application
+ // autoscaling scales out your scalable target immediately.
+ ScaleInCooldown *int64 `type:"integer"`
+
+ // The amount of time, in seconds, after a scale out activity completes before
+ // another scale out activity can start. While the cooldown period is in effect,
+ // the capacity that has been added by the previous scale out event that initiated
+ // the cooldown is calculated as part of the desired capacity for the next scale
+ // out. You should continuously (but not excessively) scale out.
+ ScaleOutCooldown *int64 `type:"integer"`
+
+ // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108
+ // (Base 10) or 2e-360 to 2e360 (Base 2).
+ //
+ // TargetValue is a required field
+ TargetValue *float64 `type:"double" required:"true"`
+}
+
+// String returns the string representation
+func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"}
+
+ if s.TargetValue == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TargetValue"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Contains the description of the backup created for the table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BackupDescription
+type BackupDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the details of the backup created for the table.
+ BackupDetails *BackupDetails `type:"structure"`
+
+ // Contains the details of the table when the backup was created.
+ SourceTableDetails *SourceTableDetails `type:"structure"`
+
+ // Contains the details of the features enabled on the table when the backup
+ // was created. For example, LSIs, GSIs, streams, TTL.
+ SourceTableFeatureDetails *SourceTableFeatureDetails `type:"structure"`
+}
+
+// String returns the string representation
+func (s BackupDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BackupDescription) GoString() string {
+ return s.String()
+}
+
+// Contains the details of the backup created for the table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BackupDetails
+type BackupDetails struct {
+ _ struct{} `type:"structure"`
+
+ // ARN associated with the backup.
+ //
+ // BackupArn is a required field
+ BackupArn *string `min:"37" type:"string" required:"true"`
+
+ // Time at which the backup was created. This is the request time of the backup.
+ //
+ // BackupCreationDateTime is a required field
+ BackupCreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"`
+
+ // Time at which the automatic on-demand backup created by DynamoDB will expire.
+ // This SYSTEM on-demand backup expires automatically 35 days after its creation.
+ BackupExpiryDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // Name of the requested backup.
+ //
+ // BackupName is a required field
+ BackupName *string `min:"3" type:"string" required:"true"`
+
+ // Size of the backup in bytes.
+ BackupSizeBytes *int64 `type:"long"`
+
+ // Backup can be in one of the following states: CREATING, ACTIVE, DELETED.
+ //
+ // BackupStatus is a required field
+ BackupStatus BackupStatus `type:"string" required:"true" enum:"true"`
+
+ // BackupType:
+ //
+ // * USER - On-demand backup created by you.
+ //
+ // * SYSTEM - On-demand backup automatically created by DynamoDB.
+ //
+ // BackupType is a required field
+ BackupType BackupType `type:"string" required:"true" enum:"true"`
+}
+
+// String returns the string representation
+func (s BackupDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BackupDetails) GoString() string {
+ return s.String()
+}
+
+// Contains details for the backup.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BackupSummary
+type BackupSummary struct {
+ _ struct{} `type:"structure"`
+
+ // ARN associated with the backup.
+ BackupArn *string `min:"37" type:"string"`
+
+ // Time at which the backup was created.
+ BackupCreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // Time at which the automatic on-demand backup created by DynamoDB will expire.
+ // This SYSTEM on-demand backup expires automatically 35 days after its creation.
+ BackupExpiryDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // Name of the specified backup.
+ BackupName *string `min:"3" type:"string"`
+
+ // Size of the backup in bytes.
+ BackupSizeBytes *int64 `type:"long"`
+
+ // Backup can be in one of the following states: CREATING, ACTIVE, DELETED.
+ BackupStatus BackupStatus `type:"string" enum:"true"`
+
+ // BackupType:
+ //
+ // * USER - On-demand backup created by you.
+ //
+ // * SYSTEM - On-demand backup automatically created by DynamoDB.
+ BackupType BackupType `type:"string" enum:"true"`
+
+ // ARN associated with the table.
+ TableArn *string `type:"string"`
+
+ // Unique identifier for the table.
+ TableId *string `type:"string"`
+
+ // Name of the table.
+ TableName *string `min:"3" type:"string"`
+}
+
+// String returns the string representation
+func (s BackupSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BackupSummary) GoString() string {
+ return s.String()
+}
+
+// Represents the input of a BatchGetItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItemInput
+type BatchGetItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of one or more table names and, for each table, a map that describes
+ // one or more items to retrieve from that table. Each table name can be used
+ // only once per BatchGetItem request.
+ //
+ // Each element in the map of items to retrieve consists of the following:
+ //
+ // * ConsistentRead - If true, a strongly consistent read is used; if false
+ // (the default), an eventually consistent read is used.
+ //
+ // * ExpressionAttributeNames - One or more substitution tokens for attribute
+ // names in the ProjectionExpression parameter. The following are some use
+ // cases for using ExpressionAttributeNames:
+ //
+ // To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could
+ // specify the following for ExpressionAttributeNames:
+ //
+ // {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // * Keys - An array of primary key attribute values that define specific
+ // items in the table. For each primary key, you must provide all of the
+ // key attributes. For example, with a simple primary key, you only need
+ // to provide the partition key value. For a composite key, you must provide
+ // both the partition key value and the sort key value.
+ //
+ // * ProjectionExpression - A string that identifies one or more attributes
+ // to retrieve from the table. These attributes can include scalars, sets,
+ // or elements of a JSON document. The attributes in the expression must
+ // be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned.
+ // If any of the requested attributes are not found, they will not appear
+ // in the result.
+ //
+ // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // * AttributesToGet - This is a legacy parameter. Use ProjectionExpression
+ // instead. For more information, see AttributesToGet (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // RequestItems is a required field
+ RequestItems map[string]KeysAndAttributes `min:"1" type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem, do not access
+ // any indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity ReturnConsumedCapacity `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s BatchGetItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchGetItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BatchGetItemInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "BatchGetItemInput"}
+
+ if s.RequestItems == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RequestItems"))
+ }
+ if s.RequestItems != nil && len(s.RequestItems) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("RequestItems", 1))
+ }
+ if s.RequestItems != nil {
+ for i, v := range s.RequestItems {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequestItems", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a BatchGetItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItemOutput
+type BatchGetItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The read capacity units consumed by the entire BatchGetItem operation.
+ //
+ // Each element consists of:
+ //
+ // * TableName - The table that consumed the provisioned throughput.
+ //
+ // * CapacityUnits - The total number of capacity units consumed.
+ ConsumedCapacity []ConsumedCapacity `type:"list"`
+
+ // A map of table name to a list of items. Each object in Responses consists
+ // of a table name, along with a map of attribute data consisting of the data
+ // type and attribute value.
+ Responses map[string][]map[string]AttributeValue `type:"map"`
+
+ // A map of tables and their respective keys that were not processed with the
+ // current response. The UnprocessedKeys value is in the same form as RequestItems,
+ // so the value can be provided directly to a subsequent BatchGetItem operation.
+ // For more information, see RequestItems in the Request Parameters section.
+ //
+ // Each element consists of:
+ //
+ // * Keys - An array of primary key attribute values that define specific
+ // items in the table.
+ //
+ // * ProjectionExpression - One or more attributes to be retrieved from the
+ // table or index. By default, all attributes are returned. If a requested
+ // attribute is not found, it does not appear in the result.
+ //
+ // * ConsistentRead - The consistency of a read operation. If set to true,
+ // then a strongly consistent read is used; otherwise, an eventually consistent
+ // read is used.
+ //
+ // If there are no unprocessed keys remaining, the response contains an empty
+ // UnprocessedKeys map.
+ UnprocessedKeys map[string]KeysAndAttributes `min:"1" type:"map"`
+}
+
+// String returns the string representation
+func (s BatchGetItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchGetItemOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s BatchGetItemOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the input of a BatchWriteItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItemInput
+type BatchWriteItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of one or more table names and, for each table, a list of operations
+ // to be performed (DeleteRequest or PutRequest). Each element in the map consists
+ // of the following:
+ //
+ // * DeleteRequest - Perform a DeleteItem operation on the specified item.
+ // The item to be deleted is identified by a Key subelement:
+ //
+ // Key - A map of primary key attribute values that uniquely identify the item.
+ // Each entry in this map consists of an attribute name and an attribute
+ // value. For each primary key, you must provide all of the key attributes.
+ // For example, with a simple primary key, you only need to provide a value
+ // for the partition key. For a composite primary key, you must provide values
+ // for both the partition key and the sort key.
+ //
+ // * PutRequest - Perform a PutItem operation on the specified item. The
+ // item to be put is identified by an Item subelement:
+ //
+ // Item - A map of attributes and their values. Each entry in this map consists
+ // of an attribute name and an attribute value. Attribute values must not
+ // be null; string and binary type attributes must have lengths greater than
+ // zero; and set type attributes must not be empty. Requests that contain
+ // empty values will be rejected with a ValidationException exception.
+ //
+ // If you specify any attributes that are part of an index key, then the data
+ // types for those attributes must match those of the schema in the table's
+ // attribute definition.
+ //
+ // RequestItems is a required field
+ RequestItems map[string][]WriteRequest `min:"1" type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem, do not access
+ // any indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity ReturnConsumedCapacity `type:"string" enum:"true"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections, if any, that were
+ // modified during the operation are returned in the response. If set to NONE
+ // (the default), no statistics are returned.
+ ReturnItemCollectionMetrics ReturnItemCollectionMetrics `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s BatchWriteItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchWriteItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BatchWriteItemInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "BatchWriteItemInput"}
+
+ if s.RequestItems == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RequestItems"))
+ }
+ if s.RequestItems != nil && len(s.RequestItems) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("RequestItems", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a BatchWriteItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItemOutput
+type BatchWriteItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The capacity units consumed by the entire BatchWriteItem operation.
+ //
+ // Each element consists of:
+ //
+ // * TableName - The table that consumed the provisioned throughput.
+ //
+ // * CapacityUnits - The total number of capacity units consumed.
+ ConsumedCapacity []ConsumedCapacity `type:"list"`
+
+ // A list of tables that were processed by BatchWriteItem and, for each table,
+ // information about any item collections that were affected by individual DeleteItem
+ // or PutItem operations.
+ //
+ // Each entry consists of the following subelements:
+ //
+ // * ItemCollectionKey - The partition key value of the item collection.
+ // This is the same as the partition key value of the item.
+ //
+ // * SizeEstimateRangeGB - An estimate of item collection size, expressed
+ // in GB. This is a two-element array containing a lower bound and an upper
+ // bound for the estimate. The estimate includes the size of all the items
+ // in the table, plus the size of all attributes projected into all of the
+ // local secondary indexes on the table. Use this estimate to measure whether
+ // a local secondary index is approaching its size limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ ItemCollectionMetrics map[string][]ItemCollectionMetrics `type:"map"`
+
+ // A map of tables and requests against those tables that were not processed.
+ // The UnprocessedItems value is in the same form as RequestItems, so you can
+ // provide this value directly to a subsequent BatchGetItem operation. For more
+ // information, see RequestItems in the Request Parameters section.
+ //
+ // Each UnprocessedItems entry consists of a table name and, for that table,
+ // a list of operations to perform (DeleteRequest or PutRequest).
+ //
+ // * DeleteRequest - Perform a DeleteItem operation on the specified item.
+ // The item to be deleted is identified by a Key subelement:
+ //
+ // Key - A map of primary key attribute values that uniquely identify the item.
+ // Each entry in this map consists of an attribute name and an attribute
+ // value.
+ //
+ // * PutRequest - Perform a PutItem operation on the specified item. The
+ // item to be put is identified by an Item subelement:
+ //
+ // Item - A map of attributes and their values. Each entry in this map consists
+ // of an attribute name and an attribute value. Attribute values must not
+ // be null; string and binary type attributes must have lengths greater than
+ // zero; and set type attributes must not be empty. Requests that contain
+ // empty values will be rejected with a ValidationException exception.
+ //
+ // If you specify any attributes that are part of an index key, then the data
+ // types for those attributes must match those of the schema in the table's
+ // attribute definition.
+ //
+ // If there are no unprocessed items remaining, the response contains an empty
+ // UnprocessedItems map.
+ UnprocessedItems map[string][]WriteRequest `min:"1" type:"map"`
+}
+
+// String returns the string representation
+func (s BatchWriteItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchWriteItemOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s BatchWriteItemOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the amount of provisioned throughput capacity consumed on a table
+// or an index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Capacity
+type Capacity struct {
+ _ struct{} `type:"structure"`
+
+ // The total number of capacity units consumed on a table or an index.
+ CapacityUnits *float64 `type:"double"`
+}
+
+// String returns the string representation
+func (s Capacity) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Capacity) GoString() string {
+ return s.String()
+}
+
+// Represents the selection criteria for a Query or Scan operation:
+//
+// * For a Query operation, Condition is used for specifying the KeyConditions
+// to use when querying a table or an index. For KeyConditions, only the
+// following comparison operators are supported:
+//
+// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN
+//
+// Condition is also used in a QueryFilter, which evaluates the query results
+// and returns only the desired values.
+//
+// * For a Scan operation, Condition is used in a ScanFilter, which evaluates
+// the scan results and returns only the desired values.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Condition
+type Condition struct {
+ _ struct{} `type:"structure"`
+
+ // One or more values to evaluate against the supplied attribute. The number
+ // of values in the list depends on the ComparisonOperator being used.
+ //
+ // For type Number, value comparisons are numeric.
+ //
+ // String value comparisons for greater than, equals, or less than are based
+ // on ASCII character code values. For example, a is greater than A, and a is
+ // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters
+ // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters).
+ //
+ // For Binary, DynamoDB treats each byte of the binary data as unsigned when
+ // it compares binary values.
+ AttributeValueList []AttributeValue `type:"list"`
+
+ // A comparator for evaluating attributes. For example, equals, greater than,
+ // less than, etc.
+ //
+ // The following comparison operators are available:
+ //
+ // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
+ // BEGINS_WITH | IN | BETWEEN
+ //
+ // The following are descriptions of each comparison operator.
+ //
+ // * EQ : Equal. EQ is supported for all data types, including lists and
+ // maps.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, Binary, String Set, Number Set, or Binary Set. If an item contains
+ // an AttributeValue element of a different type than the one provided in
+ // the request, the value does not match. For example, {"S":"6"} does not
+ // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}.
+ //
+ // * NE : Not equal. NE is supported for all data types, including lists
+ // and maps.
+ //
+ // * AttributeValueList can contain only one AttributeValue of type String,
+ // Number, Binary, String Set, Number Set, or Binary Set. If an item contains
+ // an AttributeValue of a different type than the one provided in the request,
+ // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}.
+ // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}.
+ //
+ // * LE : Less than or equal.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If an item contains an AttributeValue
+ // element of a different type than the one provided in the request, the value
+ // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]}.
+ //
+ // LT: Less than.
+ //
+ // AttributeValueListcan contain only one AttributeValueof type String, Number, or Binary (not a set type). If an item contains an
+ // AttributeValueelement of a different type than the one provided in the request, the value
+ // does not match. For example, {"S":"6"}does not equal {"N":"6"}. Also, {"N":"6"}does not compare to {"NS":["6", "2", "1"]}
+ //
+ // ComparisonOperator is a required field
+ ComparisonOperator ComparisonOperator `type:"string" required:"true" enum:"true"`
+}
+
+// String returns the string representation
+func (s Condition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Condition) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Condition) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "Condition"}
+ if len(s.ComparisonOperator) == 0 {
+ invalidParams.Add(aws.NewErrParamRequired("ComparisonOperator"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// The capacity units consumed by an operation. The data returned includes the
+// total provisioned throughput consumed, along with statistics for the table
+// and any indexes involved in the operation. ConsumedCapacity is only returned
+// if the request asked for it. For more information, see Provisioned Throughput
+// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+// in the Amazon DynamoDB Developer Guide.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ConsumedCapacity
+type ConsumedCapacity struct {
+ _ struct{} `type:"structure"`
+
+ // The total number of capacity units consumed by the operation.
+ CapacityUnits *float64 `type:"double"`
+
+ // The amount of throughput consumed on each global index affected by the operation.
+ GlobalSecondaryIndexes map[string]Capacity `type:"map"`
+
+ // The amount of throughput consumed on each local index affected by the operation.
+ LocalSecondaryIndexes map[string]Capacity `type:"map"`
+
+ // The amount of throughput consumed on the table affected by the operation.
+ Table *Capacity `type:"structure"`
+
+ // The name of the table that was affected by the operation.
+ TableName *string `min:"3" type:"string"`
+}
+
+// String returns the string representation
+func (s ConsumedCapacity) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ConsumedCapacity) GoString() string {
+ return s.String()
+}
+
+// Represents the continuous backups and point in time recovery settings on
+// the table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ContinuousBackupsDescription
+type ContinuousBackupsDescription struct {
+ _ struct{} `type:"structure"`
+
+ // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED
+ //
+ // ContinuousBackupsStatus is a required field
+ ContinuousBackupsStatus ContinuousBackupsStatus `type:"string" required:"true" enum:"true"`
+
+ // The description of the point in time recovery settings applied to the table.
+ PointInTimeRecoveryDescription *PointInTimeRecoveryDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s ContinuousBackupsDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContinuousBackupsDescription) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackupInput
+type CreateBackupInput struct {
+ _ struct{} `type:"structure"`
+
+ // Specified name for the backup.
+ //
+ // BackupName is a required field
+ BackupName *string `min:"3" type:"string" required:"true"`
+
+ // The name of the table.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateBackupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBackupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBackupInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "CreateBackupInput"}
+
+ if s.BackupName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("BackupName"))
+ }
+ if s.BackupName != nil && len(*s.BackupName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("BackupName", 3))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackupOutput
+type CreateBackupOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Contains the details of the backup created for the table.
+ BackupDetails *BackupDetails `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateBackupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBackupOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s CreateBackupOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents a new global secondary index to be added to an existing table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalSecondaryIndexAction
+type CreateGlobalSecondaryIndexAction struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index to be created.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // The key schema for the global secondary index.
+ //
+ // KeySchema is a required field
+ KeySchema []KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // Represents attributes that are copied (projected) from the table into an
+ // index. These are in addition to the primary key attributes and index key
+ // attributes, which are automatically projected.
+ //
+ // Projection is a required field
+ Projection *Projection `type:"structure" required:"true"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // ProvisionedThroughput is a required field
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateGlobalSecondaryIndexAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateGlobalSecondaryIndexAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateGlobalSecondaryIndexAction) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "CreateGlobalSecondaryIndexAction"}
+
+ if s.IndexName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+
+ if s.KeySchema == nil {
+ invalidParams.Add(aws.NewErrParamRequired("KeySchema"))
+ }
+ if s.KeySchema != nil && len(s.KeySchema) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("KeySchema", 1))
+ }
+
+ if s.Projection == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Projection"))
+ }
+
+ if s.ProvisionedThroughput == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ProvisionedThroughput"))
+ }
+ if s.KeySchema != nil {
+ for i, v := range s.KeySchema {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.Projection != nil {
+ if err := s.Projection.Validate(); err != nil {
+ invalidParams.AddNested("Projection", err.(aws.ErrInvalidParams))
+ }
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTableInput
+type CreateGlobalTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The global table name.
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+
+ // The regions where the global table needs to be created.
+ //
+ // ReplicationGroup is a required field
+ ReplicationGroup []Replica `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateGlobalTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateGlobalTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateGlobalTableInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "CreateGlobalTableInput"}
+
+ if s.GlobalTableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("GlobalTableName", 3))
+ }
+
+ if s.ReplicationGroup == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ReplicationGroup"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTableOutput
+type CreateGlobalTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Contains the details of the global table.
+ GlobalTableDescription *GlobalTableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateGlobalTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateGlobalTableOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s CreateGlobalTableOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents a replica to be added.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateReplicaAction
+type CreateReplicaAction struct {
+ _ struct{} `type:"structure"`
+
+ // The region of the replica to be added.
+ //
+ // RegionName is a required field
+ RegionName *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateReplicaAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateReplicaAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateReplicaAction) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "CreateReplicaAction"}
+
+ if s.RegionName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RegionName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the input of a CreateTable operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTableInput
+type CreateTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of attributes that describe the key schema for the table and indexes.
+ //
+ // AttributeDefinitions is a required field
+ AttributeDefinitions []AttributeDefinition `type:"list" required:"true"`
+
+ // One or more global secondary indexes (the maximum is five) to be created
+ // on the table. Each global secondary index in the array includes the following:
+ //
+ // * IndexName - The name of the global secondary index. Must be unique only
+ // for this table.
+ //
+ // * KeySchema - Specifies the key schema for the global secondary index.
+ //
+ // * Projection - Specifies attributes that are copied (projected) from the
+ // table into the index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected. Each attribute
+ // specification is composed of:
+ //
+ // * ProjectionType - One of the following:
+ //
+ // KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // INCLUDE - Only the specified table attributes are projected into the index.
+ // The list of projected attributes are in NonKeyAttributes.
+ //
+ // ALL - All of the table attributes are projected into the index.
+ //
+ // NonKeyAttributes - A list of one or more non-key attribute names that are
+ // projected into the secondary index. The total count of attributes provided
+ // in NonKeyAttributes, summed across all of the secondary indexes, must
+ // not exceed 20. If you project the same attribute into two different indexes,
+ // this counts as two distinct attributes when determining the total.
+ //
+ // * ProvisionedThroughput - The provisioned throughput settings for the
+ // global secondary index, consisting of read and write capacity units.
+ GlobalSecondaryIndexes []GlobalSecondaryIndex `type:"list"`
+
+ // Specifies the attributes that make up the primary key for a table or an index.
+ // The attributes in KeySchema must also be defined in the AttributeDefinitions
+ // array. For more information, see Data Model (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // Each KeySchemaElement in the array is composed of:
+ //
+ // * AttributeName - The name of this key attribute.
+ //
+ // * KeyType - The role that the key attribute will assume:
+ //
+ // HASH - partition key
+ //
+ // RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // For a simple primary key (partition key), you must provide exactly one element
+ // with a KeyType of HASH.
+ //
+ // For a composite primary key (partition key and sort key), you must provide
+ // exactly two elements, in this order: The first element must have a KeyType
+ // of HASH, and the second element must have a KeyType of RANGE.
+ //
+ // For more information, see Specifying the Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // KeySchema is a required field
+ KeySchema []KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // One or more local secondary indexes (the maximum is five) to be created on
+ // the table. Each index is scoped to a given partition key value. There is
+ // a 10 GB size limit per partition key value; otherwise, the size of a local
+ // secondary index is unconstrained.
+ //
+ // Each local secondary index in the array includes the following:
+ //
+ // * IndexName - The name of the local secondary index. Must be unique only
+ // for this table.
+ //
+ // * KeySchema - Specifies the key schema for the local secondary index.
+ // The key schema must begin with the same partition key as the table.
+ //
+ // * Projection - Specifies attributes that are copied (projected) from the
+ // table into the index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected. Each attribute
+ // specification is composed of:
+ //
+ // * ProjectionType - One of the following:
+ //
+ // KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // INCLUDE - Only the specified table attributes are projected into the index.
+ // The list of projected attributes are in NonKeyAttributes.
+ //
+ // ALL - All of the table attributes are projected into the index.
+ //
+ // NonKeyAttributes - A list of one or more non-key attribute names that are
+ // projected into the secondary index. The total count of attributes provided
+ // in NonKeyAttributes, summed across all of the secondary indexes, must
+ // not exceed 20. If you project the same attribute into two different indexes,
+ // this counts as two distinct attributes when determining the total.
+ LocalSecondaryIndexes []LocalSecondaryIndex `type:"list"`
+
+ // Represents the provisioned throughput settings for a specified table or index.
+ // The settings can be modified using the UpdateTable operation.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // ProvisionedThroughput is a required field
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"`
+
+ // Represents the settings used to enable server-side encryption.
+ SSESpecification *SSESpecification `type:"structure"`
+
+ // The settings for DynamoDB Streams on the table. These settings consist of:
+ //
+ // * StreamEnabled - Indicates whether Streams is to be enabled (true) or
+ // disabled (false).
+ //
+ // * StreamViewType - When an item in the table is modified, StreamViewType
+ // determines what information is written to the table's stream. Valid values
+ // for StreamViewType are:
+ //
+ // KEYS_ONLY - Only the key attributes of the modified item are written to the
+ // stream.
+ //
+ // NEW_IMAGE - The entire item, as it appears after it was modified, is written
+ // to the stream.
+ //
+ // OLD_IMAGE - The entire item, as it appeared before it was modified, is written
+ // to the stream.
+ //
+ // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are
+ // written to the stream.
+ StreamSpecification *StreamSpecification `type:"structure"`
+
+ // The name of the table to create.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateTableInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "CreateTableInput"}
+
+ if s.AttributeDefinitions == nil {
+ invalidParams.Add(aws.NewErrParamRequired("AttributeDefinitions"))
+ }
+
+ if s.KeySchema == nil {
+ invalidParams.Add(aws.NewErrParamRequired("KeySchema"))
+ }
+ if s.KeySchema != nil && len(s.KeySchema) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("KeySchema", 1))
+ }
+
+ if s.ProvisionedThroughput == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ProvisionedThroughput"))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+ if s.AttributeDefinitions != nil {
+ for i, v := range s.AttributeDefinitions {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.GlobalSecondaryIndexes != nil {
+ for i, v := range s.GlobalSecondaryIndexes {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.KeySchema != nil {
+ for i, v := range s.KeySchema {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.LocalSecondaryIndexes != nil {
+ for i, v := range s.LocalSecondaryIndexes {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexes", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a CreateTable operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTableOutput
+type CreateTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Represents the properties of the table.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateTableOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s CreateTableOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackupInput
+type DeleteBackupInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN associated with the backup.
+ //
+ // BackupArn is a required field
+ BackupArn *string `min:"37" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBackupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBackupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBackupInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DeleteBackupInput"}
+
+ if s.BackupArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("BackupArn"))
+ }
+ if s.BackupArn != nil && len(*s.BackupArn) < 37 {
+ invalidParams.Add(aws.NewErrParamMinLen("BackupArn", 37))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackupOutput
+type DeleteBackupOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Contains the description of the backup created for the table.
+ BackupDescription *BackupDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBackupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBackupOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DeleteBackupOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents a global secondary index to be deleted from an existing table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteGlobalSecondaryIndexAction
+type DeleteGlobalSecondaryIndexAction struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index to be deleted.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteGlobalSecondaryIndexAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteGlobalSecondaryIndexAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteGlobalSecondaryIndexAction) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"}
+
+ if s.IndexName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the input of a DeleteItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItemInput
+type DeleteItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // A condition that must be satisfied in order for a conditional DeleteItem
+ // to succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // * Functions: attribute_exists | attribute_not_exists | attribute_type
+ // | contains | begins_with | size
+ //
+ // These function names are case-sensitive.
+ //
+ // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // * Logical operators: AND | OR | NOT
+ //
+ // For more information on condition expressions, see Specifying Conditions
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see ConditionalOperator (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator ConditionalOperator `type:"string" enum:"true"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Expected map[string]ExpectedAttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Specifying Conditions
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]AttributeValue `type:"map"`
+
+ // A map of attribute names to AttributeValue objects, representing the primary
+ // key of the item to delete.
+ //
+ // For the primary key, you must provide all of the attributes. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide values for both the partition
+ // key and the sort key.
+ //
+ // Key is a required field
+ Key map[string]AttributeValue `type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem, do not access
+ // any indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity ReturnConsumedCapacity `type:"string" enum:"true"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections, if any, that were
+ // modified during the operation are returned in the response. If set to NONE
+ // (the default), no statistics are returned.
+ ReturnItemCollectionMetrics ReturnItemCollectionMetrics `type:"string" enum:"true"`
+
+ // Use ReturnValues if you want to get the item attributes as they appeared
+ // before they were deleted. For DeleteItem, the valid values are:
+ //
+ // * NONE - If ReturnValues is not specified, or if its value is NONE, then
+ // nothing is returned. (This setting is the default for ReturnValues.)
+ //
+ // * ALL_OLD - The content of the old item is returned.
+ //
+ // The ReturnValues parameter is used by several DynamoDB operations; however,
+ // DeleteItem does not recognize any values other than NONE or ALL_OLD.
+ ReturnValues ReturnValue `type:"string" enum:"true"`
+
+ // The name of the table from which to delete the item.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteItemInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DeleteItemInput"}
+
+ if s.Key == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Key"))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a DeleteItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItemOutput
+type DeleteItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // A map of attribute names to AttributeValue objects, representing the item
+ // as it appeared before the DeleteItem operation. This map appears in the response
+ // only if ReturnValues was specified as ALL_OLD in the request.
+ Attributes map[string]AttributeValue `type:"map"`
+
+ // The capacity units consumed by the DeleteItem operation. The data returned
+ // includes the total provisioned throughput consumed, along with statistics
+ // for the table and any indexes involved in the operation. ConsumedCapacity
+ // is only returned if the ReturnConsumedCapacity parameter was specified. For
+ // more information, see Provisioned Throughput (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // Information about item collections, if any, that were affected by the DeleteItem
+ // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
+ // parameter was specified. If the table does not have any local secondary indexes,
+ // this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // * ItemCollectionKey - The partition key value of the item collection.
+ // This is the same as the partition key value of the item itself.
+ //
+ // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper
+ // bound for the estimate. The estimate includes the size of all the items
+ // in the table, plus the size of all attributes projected into all of the
+ // local secondary indexes on that table. Use this estimate to measure whether
+ // a local secondary index is approaching its size limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteItemOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DeleteItemOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents a replica to be removed.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteReplicaAction
+type DeleteReplicaAction struct {
+ _ struct{} `type:"structure"`
+
+ // The region of the replica to be removed.
+ //
+ // RegionName is a required field
+ RegionName *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteReplicaAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteReplicaAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteReplicaAction) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DeleteReplicaAction"}
+
+ if s.RegionName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RegionName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents a request to perform a DeleteItem operation on an item.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteRequest
+type DeleteRequest struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attribute name to attribute values, representing the primary key
+ // of the item to delete. All of the table's primary key attributes must be
+ // specified, and their data types must match those of the table's key schema.
+ //
+ // Key is a required field
+ Key map[string]AttributeValue `type:"map" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRequest) GoString() string {
+ return s.String()
+}
+
+// Represents the input of a DeleteTable operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTableInput
+type DeleteTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the table to delete.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteTableInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DeleteTableInput"}
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a DeleteTable operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTableOutput
+type DeleteTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Represents the properties of a table.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteTableOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DeleteTableOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackupInput
+type DescribeBackupInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN associated with the backup.
+ //
+ // BackupArn is a required field
+ BackupArn *string `min:"37" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeBackupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeBackupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeBackupInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DescribeBackupInput"}
+
+ if s.BackupArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("BackupArn"))
+ }
+ if s.BackupArn != nil && len(*s.BackupArn) < 37 {
+ invalidParams.Add(aws.NewErrParamMinLen("BackupArn", 37))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackupOutput
+type DescribeBackupOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Contains the description of the backup created for the table.
+ BackupDescription *BackupDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeBackupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeBackupOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DescribeBackupOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackupsInput
+type DescribeContinuousBackupsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the table for which the customer wants to check the continuous backups
+ // and point in time recovery settings.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeContinuousBackupsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeContinuousBackupsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeContinuousBackupsInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"}
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackupsOutput
+type DescribeContinuousBackupsOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Represents the continuous backups and point in time recovery settings on
+ // the table.
+ ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeContinuousBackupsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeContinuousBackupsOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DescribeContinuousBackupsOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpointsRequest
+type DescribeEndpointsInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeEndpointsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeEndpointsInput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpointsResponse
+type DescribeEndpointsOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Endpoints is a required field
+ Endpoints []Endpoint `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeEndpointsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeEndpointsOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DescribeEndpointsOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableInput
+type DescribeGlobalTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global table.
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeGlobalTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeGlobalTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeGlobalTableInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DescribeGlobalTableInput"}
+
+ if s.GlobalTableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("GlobalTableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableOutput
+type DescribeGlobalTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Contains the details of the global table.
+ GlobalTableDescription *GlobalTableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeGlobalTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeGlobalTableOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DescribeGlobalTableOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettingsInput
+type DescribeGlobalTableSettingsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global table to describe.
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeGlobalTableSettingsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeGlobalTableSettingsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeGlobalTableSettingsInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"}
+
+ if s.GlobalTableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("GlobalTableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettingsOutput
+type DescribeGlobalTableSettingsOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The name of the global table.
+ GlobalTableName *string `min:"3" type:"string"`
+
+ // The region specific settings for the global table.
+ ReplicaSettings []ReplicaSettingsDescription `type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeGlobalTableSettingsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeGlobalTableSettingsOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DescribeGlobalTableSettingsOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the input of a DescribeLimits operation. Has no content.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimitsInput
+type DescribeLimitsInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeLimitsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLimitsInput) GoString() string {
+ return s.String()
+}
+
+// Represents the output of a DescribeLimits operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimitsOutput
+type DescribeLimitsOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The maximum total read capacity units that your account allows you to provision
+ // across all of your tables in this region.
+ AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"`
+
+ // The maximum total write capacity units that your account allows you to provision
+ // across all of your tables in this region.
+ AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"`
+
+ // The maximum read capacity units that your account allows you to provision
+ // for a new table that you are creating in this region, including the read
+ // capacity units provisioned for its global secondary indexes (GSIs).
+ TableMaxReadCapacityUnits *int64 `min:"1" type:"long"`
+
+ // The maximum write capacity units that your account allows you to provision
+ // for a new table that you are creating in this region, including the write
+ // capacity units provisioned for its global secondary indexes (GSIs).
+ TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s DescribeLimitsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLimitsOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DescribeLimitsOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the input of a DescribeTable operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableInput
+type DescribeTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the table to describe.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeTableInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DescribeTableInput"}
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a DescribeTable operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableOutput
+type DescribeTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The properties of the table.
+ Table *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTableOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DescribeTableOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLiveInput
+type DescribeTimeToLiveInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the table to be described.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeTimeToLiveInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTimeToLiveInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeTimeToLiveInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DescribeTimeToLiveInput"}
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLiveOutput
+type DescribeTimeToLiveOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The description of the Time to Live (TTL) status on the specified table.
+ TimeToLiveDescription *TimeToLiveDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeTimeToLiveOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTimeToLiveOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DescribeTimeToLiveOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Endpoint
+type Endpoint struct {
+ _ struct{} `type:"structure"`
+
+ // Address is a required field
+ Address *string `type:"string" required:"true"`
+
+ // CachePeriodInMinutes is a required field
+ CachePeriodInMinutes *int64 `type:"long" required:"true"`
+}
+
+// String returns the string representation
+func (s Endpoint) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Endpoint) GoString() string {
+ return s.String()
+}
+
+// Represents a condition to be compared with an attribute value. This condition
+// can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison
+// evaluates to true, the operation succeeds; if not, the operation fails. You
+// can use ExpectedAttributeValue in one of two different ways:
+//
+// * Use AttributeValueList to specify one or more values to compare against
+// an attribute. Use ComparisonOperator to specify how you want to perform
+// the comparison. If the comparison evaluates to true, then the conditional
+// operation succeeds.
+//
+// * Use Value to specify a value that DynamoDB will compare against an attribute.
+// If the values match, then ExpectedAttributeValue evaluates to true and
+// the conditional operation succeeds. Optionally, you can also set Exists
+// to false, indicating that you do not expect to find the attribute value
+// in the table. In this case, the conditional operation succeeds only if
+// the comparison evaluates to false.
+//
+// Value and Exists are incompatible with AttributeValueList and ComparisonOperator.
+// Note that if you use both sets of parameters at once, DynamoDB will return
+// a ValidationException exception.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExpectedAttributeValue
+type ExpectedAttributeValue struct {
+ _ struct{} `type:"structure"`
+
+ // One or more values to evaluate against the supplied attribute. The number
+ // of values in the list depends on the ComparisonOperator being used.
+ //
+ // For type Number, value comparisons are numeric.
+ //
+ // String value comparisons for greater than, equals, or less than are based
+ // on ASCII character code values. For example, a is greater than A, and a is
+ // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters
+ // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters).
+ //
+ // For Binary, DynamoDB treats each byte of the binary data as unsigned when
+ // it compares binary values.
+ //
+ // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributeValueList []AttributeValue `type:"list"`
+
+ // A comparator for evaluating attributes in the AttributeValueList. For example,
+ // equals, greater than, less than, etc.
+ //
+ // The following comparison operators are available:
+ //
+ // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
+ // BEGINS_WITH | IN | BETWEEN
+ //
+ // The following are descriptions of each comparison operator.
+ //
+ // * EQ : Equal. EQ is supported for all data types, including lists and
+ // maps.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, Binary, String Set, Number Set, or Binary Set. If an item contains
+ // an AttributeValue element of a different type than the one provided in
+ // the request, the value does not match. For example, {"S":"6"} does not
+ // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}.
+ //
+ // * NE : Not equal. NE is supported for all data types, including lists
+ // and maps.
+ //
+ // * AttributeValueList can contain only one AttributeValue of type String,
+ // Number, Binary, String Set, Number Set, or Binary Set. If an item contains
+ // an AttributeValue of a different type than the one provided in the request,
+ // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}.
+ // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}.
+ //
+ // * LE : Less than or equal.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If an item contains an AttributeValue
+ // element of a different type than the one provided in the request, the value
+ // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]}.
+ //
+ // LT: Less than.
+ //
+ // AttributeValueListcan contain only one AttributeValueof type String, Number, or Binary (not a set type). If an item contains an
+ // AttributeValueelement of a different type than the one provided in the request, the value
+ // does not match. For example, {"S":"6"}does not equal {"N":"6"}. Also, {"N":"6"}does not compare to {"NS":["6", "2", "1"]}
+ ComparisonOperator ComparisonOperator `type:"string" enum:"true"`
+
+ // Causes DynamoDB to evaluate the value before attempting a conditional operation:
+ //
+ // * If Exists is true, DynamoDB will check to see if that attribute value
+ // already exists in the table. If it is found, then the operation succeeds.
+ // If it is not found, the operation fails with a ConditionalCheckFailedException.
+ //
+ // * If Exists is false, DynamoDB assumes that the attribute value does not
+ // exist in the table. If in fact the value does not exist, then the assumption
+ // is valid and the operation succeeds. If the value is found, despite the
+ // assumption that it does not exist, the operation fails with a ConditionalCheckFailedException.
+ //
+ // The default setting for Exists is true. If you supply a Value all by itself,
+ // DynamoDB assumes the attribute exists: You don't have to set Exists to true,
+ // because it is implied.
+ //
+ // DynamoDB returns a ValidationException if:
+ //
+ // * Exists is true but there is no Value to check. (You expect a value to
+ // exist, but don't specify what that value is.)
+ //
+ // * Exists is false but you also provide a Value. (You cannot expect an
+ // attribute to have a value, while also expecting it not to exist.)
+ Exists *bool `type:"boolean"`
+
+ // Represents the data for the expected attribute.
+ //
+ // Each attribute value is described as a name-value pair. The name is the data
+ // type, and the value is the data itself.
+ //
+ // For more information, see Data Types (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
+ // in the Amazon DynamoDB Developer Guide.
+ Value *AttributeValue `type:"structure"`
+}
+
+// String returns the string representation
+func (s ExpectedAttributeValue) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ExpectedAttributeValue) GoString() string {
+ return s.String()
+}
+
+// Represents the input of a GetItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItemInput
+type GetItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more information,
+ // see AttributesToGet (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributesToGet []string `min:"1" type:"list"`
+
+ // Determines the read consistency model: If set to true, then the operation
+ // uses strongly consistent reads; otherwise, the operation uses eventually
+ // consistent reads.
+ ConsistentRead *bool `type:"boolean"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]string `type:"map"`
+
+ // A map of attribute names to AttributeValue objects, representing the primary
+ // key of the item to retrieve.
+ //
+ // For the primary key, you must provide all of the attributes. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide values for both the partition
+ // key and the sort key.
+ //
+ // Key is a required field
+ Key map[string]AttributeValue `type:"map" required:"true"`
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document.
+ // The attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned.
+ // If any of the requested attributes are not found, they will not appear in
+ // the result.
+ //
+ // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProjectionExpression *string `type:"string"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem, do not access
+ // any indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity ReturnConsumedCapacity `type:"string" enum:"true"`
+
+ // The name of the table containing the requested item.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetItemInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "GetItemInput"}
+ if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("AttributesToGet", 1))
+ }
+
+ if s.Key == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Key"))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a GetItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItemOutput
+type GetItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The capacity units consumed by the GetItem operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the
+ // table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see Provisioned Throughput (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // A map of attribute names to AttributeValue objects, as specified by ProjectionExpression.
+ Item map[string]AttributeValue `type:"map"`
+}
+
+// String returns the string representation
+func (s GetItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetItemOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s GetItemOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the properties of a global secondary index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GlobalSecondaryIndex
+type GlobalSecondaryIndex struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // The complete key schema for a global secondary index, which consists of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // KeySchema is a required field
+ KeySchema []KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ //
+ // Projection is a required field
+ Projection *Projection `type:"structure" required:"true"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // ProvisionedThroughput is a required field
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s GlobalSecondaryIndex) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalSecondaryIndex) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlobalSecondaryIndex) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "GlobalSecondaryIndex"}
+
+ if s.IndexName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+
+ if s.KeySchema == nil {
+ invalidParams.Add(aws.NewErrParamRequired("KeySchema"))
+ }
+ if s.KeySchema != nil && len(s.KeySchema) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("KeySchema", 1))
+ }
+
+ if s.Projection == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Projection"))
+ }
+
+ if s.ProvisionedThroughput == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ProvisionedThroughput"))
+ }
+ if s.KeySchema != nil {
+ for i, v := range s.KeySchema {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.Projection != nil {
+ if err := s.Projection.Validate(); err != nil {
+ invalidParams.AddNested("Projection", err.(aws.ErrInvalidParams))
+ }
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the properties of a global secondary index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GlobalSecondaryIndexDescription
+type GlobalSecondaryIndexDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether the index is currently backfilling. Backfilling is the
+ // process of reading items from the table and determining whether they can
+ // be added to the index. (Not all items will qualify: For example, a partition
+ // key cannot have any duplicate values.) If an item can be added to the index,
+ // DynamoDB will do so. After all items have been processed, the backfilling
+ // operation is complete and Backfilling is false.
+ //
+ // For indexes that were created during a CreateTable operation, the Backfilling
+ // attribute does not appear in the DescribeTable output.
+ Backfilling *bool `type:"boolean"`
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the index.
+ IndexArn *string `type:"string"`
+
+ // The name of the global secondary index.
+ IndexName *string `min:"3" type:"string"`
+
+ // The total size of the specified index, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ IndexSizeBytes *int64 `type:"long"`
+
+ // The current state of the global secondary index:
+ //
+ // * CREATING - The index is being created.
+ //
+ // * UPDATING - The index is being updated.
+ //
+ // * DELETING - The index is being deleted.
+ //
+ // * ACTIVE - The index is ready for use.
+ IndexStatus IndexStatus `type:"string" enum:"true"`
+
+ // The number of items in the specified index. DynamoDB updates this value approximately
+ // every six hours. Recent changes might not be reflected in this value.
+ ItemCount *int64 `type:"long"`
+
+ // The complete key schema for a global secondary index, which consists of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []KeySchemaElement `min:"1" type:"list"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ Projection *Projection `type:"structure"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s GlobalSecondaryIndexDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalSecondaryIndexDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the properties of a global secondary index for the table when
+// the backup was created.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GlobalSecondaryIndexInfo
+type GlobalSecondaryIndexInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index.
+ IndexName *string `min:"3" type:"string"`
+
+ // The complete key schema for a global secondary index, which consists of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []KeySchemaElement `min:"1" type:"list"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ Projection *Projection `type:"structure"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
+}
+
+// String returns the string representation
+func (s GlobalSecondaryIndexInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalSecondaryIndexInfo) GoString() string {
+ return s.String()
+}
+
+// Represents one of the following:
+//
+// * A new global secondary index to be added to an existing table.
+//
+// * New provisioned throughput parameters for an existing global secondary
+// index.
+//
+// * An existing global secondary index to be removed from an existing table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GlobalSecondaryIndexUpdate
+type GlobalSecondaryIndexUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The parameters required for creating a global secondary index on an existing
+ // table:
+ //
+ // * IndexName
+ //
+ // * KeySchema
+ //
+ // * AttributeDefinitions
+ //
+ // * Projection
+ //
+ // * ProvisionedThroughput
+ Create *CreateGlobalSecondaryIndexAction `type:"structure"`
+
+ // The name of an existing global secondary index to be removed.
+ Delete *DeleteGlobalSecondaryIndexAction `type:"structure"`
+
+ // The name of an existing global secondary index, along with new provisioned
+ // throughput settings to be applied to that index.
+ Update *UpdateGlobalSecondaryIndexAction `type:"structure"`
+}
+
+// String returns the string representation
+func (s GlobalSecondaryIndexUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalSecondaryIndexUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlobalSecondaryIndexUpdate) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "GlobalSecondaryIndexUpdate"}
+ if s.Create != nil {
+ if err := s.Create.Validate(); err != nil {
+ invalidParams.AddNested("Create", err.(aws.ErrInvalidParams))
+ }
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(aws.ErrInvalidParams))
+ }
+ }
+ if s.Update != nil {
+ if err := s.Update.Validate(); err != nil {
+ invalidParams.AddNested("Update", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the properties of a global table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GlobalTable
+type GlobalTable struct {
+ _ struct{} `type:"structure"`
+
+ // The global table name.
+ GlobalTableName *string `min:"3" type:"string"`
+
+ // The regions where the global table has replicas.
+ ReplicationGroup []Replica `type:"list"`
+}
+
+// String returns the string representation
+func (s GlobalTable) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalTable) GoString() string {
+ return s.String()
+}
+
+// Contains details about the global table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GlobalTableDescription
+type GlobalTableDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The creation time of the global table.
+ CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // The unique identifier of the global table.
+ GlobalTableArn *string `type:"string"`
+
+ // The global table name.
+ GlobalTableName *string `min:"3" type:"string"`
+
+ // The current state of the global table:
+ //
+ // * CREATING - The global table is being created.
+ //
+ // * UPDATING - The global table is being updated.
+ //
+ // * DELETING - The global table is being deleted.
+ //
+ // * ACTIVE - The global table is ready for use.
+ GlobalTableStatus GlobalTableStatus `type:"string" enum:"true"`
+
+ // The regions where the global table has replicas.
+ ReplicationGroup []ReplicaDescription `type:"list"`
+}
+
+// String returns the string representation
+func (s GlobalTableDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalTableDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the settings of a global secondary index for a global table that
+// will be modified.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GlobalTableGlobalSecondaryIndexSettingsUpdate
+type GlobalTableGlobalSecondaryIndexSettingsUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // AutoScaling settings for managing a global secondary index's write capacity
+ // units.
+ ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException.
+ ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"}
+
+ if s.IndexName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.ProvisionedWriteCapacityUnits != nil && *s.ProvisionedWriteCapacityUnits < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("ProvisionedWriteCapacityUnits", 1))
+ }
+ if s.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
+ if err := s.ProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Information about item collections, if any, that were affected by the operation.
+// ItemCollectionMetrics is only returned if the request asked for it. If the
+// table does not have any local secondary indexes, this information is not
+// returned in the response.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ItemCollectionMetrics
+type ItemCollectionMetrics struct {
+ _ struct{} `type:"structure"`
+
+ // The partition key value of the item collection. This value is the same as
+ // the partition key value of the item.
+ ItemCollectionKey map[string]AttributeValue `type:"map"`
+
+ // An estimate of item collection size, in gigabytes. This value is a two-element
+ // array containing a lower bound and an upper bound for the estimate. The estimate
+ // includes the size of all the items in the table, plus the size of all attributes
+ // projected into all of the local secondary indexes on that table. Use this
+ // estimate to measure whether a local secondary index is approaching its size
+ // limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ SizeEstimateRangeGB []float64 `type:"list"`
+}
+
+// String returns the string representation
+func (s ItemCollectionMetrics) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ItemCollectionMetrics) GoString() string {
+ return s.String()
+}
+
+// Represents a single element of a key schema. A key schema specifies the attributes
+// that make up the primary key of a table, or the key attributes of an index.
+//
+// A KeySchemaElement represents exactly one attribute of the primary key. For
+// example, a simple primary key would be represented by one KeySchemaElement
+// (for the partition key). A composite primary key would require one KeySchemaElement
+// for the partition key, and another KeySchemaElement for the sort key.
+//
+// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute).
+// The data type must be one of String, Number, or Binary. The attribute cannot
+// be nested within a List or a Map.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/KeySchemaElement
+type KeySchemaElement struct {
+ _ struct{} `type:"structure"`
+
+ // The name of a key attribute.
+ //
+ // AttributeName is a required field
+ AttributeName *string `min:"1" type:"string" required:"true"`
+
+ // The role that this key attribute will assume:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // KeyType is a required field
+ KeyType KeyType `type:"string" required:"true" enum:"true"`
+}
+
+// String returns the string representation
+func (s KeySchemaElement) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeySchemaElement) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *KeySchemaElement) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "KeySchemaElement"}
+
+ if s.AttributeName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("AttributeName"))
+ }
+ if s.AttributeName != nil && len(*s.AttributeName) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("AttributeName", 1))
+ }
+ if len(s.KeyType) == 0 {
+ invalidParams.Add(aws.NewErrParamRequired("KeyType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents a set of primary keys and, for each key, the attributes to retrieve
+// from the table.
+//
+// For each primary key, you must provide all of the key attributes. For example,
+// with a simple primary key, you only need to provide the partition key. For
+// a composite primary key, you must provide both the partition key and the
+// sort key.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/KeysAndAttributes
+type KeysAndAttributes struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more information,
+ // see Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributesToGet []string `min:"1" type:"list"`
+
+ // The consistency of a read operation. If set to true, then a strongly consistent
+ // read is used; otherwise, an eventually consistent read is used.
+ ConsistentRead *bool `type:"boolean"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]string `type:"map"`
+
+ // The primary key attribute values that define the items and the attributes
+ // associated with the items.
+ //
+ // Keys is a required field
+ Keys []map[string]AttributeValue `min:"1" type:"list" required:"true"`
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document.
+ // The attributes in the ProjectionExpression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned.
+ // If any of the requested attributes are not found, they will not appear in
+ // the result.
+ //
+ // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProjectionExpression *string `type:"string"`
+}
+
+// String returns the string representation
+func (s KeysAndAttributes) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeysAndAttributes) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *KeysAndAttributes) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "KeysAndAttributes"}
+ if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("AttributesToGet", 1))
+ }
+
+ if s.Keys == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Keys"))
+ }
+ if s.Keys != nil && len(s.Keys) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("Keys", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackupsInput
+type ListBackupsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The backups from the table specified by BackupType are listed.
+ //
+ // Where BackupType can be:
+ //
+ // * USER - On-demand backup created by you.
+ //
+ // * SYSTEM - On-demand backup automatically created by DynamoDB.
+ //
+ // * ALL - All types of on-demand backups (USER and SYSTEM).
+ BackupType BackupTypeFilter `type:"string" enum:"true"`
+
+ // LastEvaluatedBackupArn is the ARN of the backup last evaluated when the current
+ // page of results was returned, inclusive of the current page of results. This
+ // value may be specified as the ExclusiveStartBackupArn of a new ListBackups
+ // operation in order to fetch the next page of results.
+ ExclusiveStartBackupArn *string `min:"37" type:"string"`
+
+ // Maximum number of backups to return at once.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // The backups from the table specified by TableName are listed.
+ TableName *string `min:"3" type:"string"`
+
+ // Only backups created after this time are listed. TimeRangeLowerBound is inclusive.
+ TimeRangeLowerBound *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // Only backups created before this time are listed. TimeRangeUpperBound is
+ // exclusive.
+ TimeRangeUpperBound *time.Time `type:"timestamp" timestampFormat:"unix"`
+}
+
+// String returns the string representation
+func (s ListBackupsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBackupsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBackupsInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ListBackupsInput"}
+ if s.ExclusiveStartBackupArn != nil && len(*s.ExclusiveStartBackupArn) < 37 {
+ invalidParams.Add(aws.NewErrParamMinLen("ExclusiveStartBackupArn", 37))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("Limit", 1))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackupsOutput
+type ListBackupsOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // List of BackupSummary objects.
+ BackupSummaries []BackupSummary `type:"list"`
+
+ // The ARN of the backup last evaluated when the current page of results was
+ // returned, inclusive of the current page of results. This value may be specified
+ // as the ExclusiveStartBackupArn of a new ListBackups operation in order to
+ // fetch the next page of results.
+ //
+ // If LastEvaluatedBackupArn is empty, then the last page of results has been
+ // processed and there are no more results to be retrieved.
+ //
+ // If LastEvaluatedBackupArn is not empty, this may or may not indicate there
+ // is more data to be returned. All results are guaranteed to have been returned
+ // if and only if no value for LastEvaluatedBackupArn is returned.
+ LastEvaluatedBackupArn *string `min:"37" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBackupsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBackupsOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s ListBackupsOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTablesInput
+type ListGlobalTablesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The first global table name that this operation will evaluate.
+ ExclusiveStartGlobalTableName *string `min:"3" type:"string"`
+
+ // The maximum number of table names to return.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // Lists the global tables in a specific region.
+ RegionName *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListGlobalTablesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListGlobalTablesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListGlobalTablesInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ListGlobalTablesInput"}
+ if s.ExclusiveStartGlobalTableName != nil && len(*s.ExclusiveStartGlobalTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("ExclusiveStartGlobalTableName", 3))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("Limit", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTablesOutput
+type ListGlobalTablesOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // List of global table names.
+ GlobalTables []GlobalTable `type:"list"`
+
+ // Last evaluated global table name.
+ LastEvaluatedGlobalTableName *string `min:"3" type:"string"`
+}
+
+// String returns the string representation
+func (s ListGlobalTablesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListGlobalTablesOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s ListGlobalTablesOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the input of a ListTables operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTablesInput
+type ListTablesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The first table name that this operation will evaluate. Use the value that
+ // was returned for LastEvaluatedTableName in a previous operation, so that
+ // you can obtain the next page of results.
+ ExclusiveStartTableName *string `min:"3" type:"string"`
+
+ // A maximum number of table names to return. If this parameter is not specified,
+ // the limit is 100.
+ Limit *int64 `min:"1" type:"integer"`
+}
+
+// String returns the string representation
+func (s ListTablesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTablesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListTablesInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ListTablesInput"}
+ if s.ExclusiveStartTableName != nil && len(*s.ExclusiveStartTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("ExclusiveStartTableName", 3))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("Limit", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a ListTables operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTablesOutput
+type ListTablesOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The name of the last table in the current page of results. Use this value
+ // as the ExclusiveStartTableName in a new request to obtain the next page of
+ // results, until all the table names are returned.
+ //
+ // If you do not receive a LastEvaluatedTableName value in the response, this
+ // means that there are no more table names to be retrieved.
+ LastEvaluatedTableName *string `min:"3" type:"string"`
+
+ // The names of the tables associated with the current account at the current
+ // endpoint. The maximum size of this array is 100.
+ //
+ // If LastEvaluatedTableName also appears in the output, you can use this value
+ // as the ExclusiveStartTableName parameter in a subsequent ListTables request
+ // and obtain the next page of results.
+ TableNames []string `type:"list"`
+}
+
+// String returns the string representation
+func (s ListTablesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTablesOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s ListTablesOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResourceInput
+type ListTagsOfResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // An optional string that, if supplied, must be copied from the output of a
+ // previous call to ListTagOfResource. When provided in this manner, this API
+ // fetches the next page of results.
+ NextToken *string `type:"string"`
+
+ // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon
+ // Resource Name (ARN).
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListTagsOfResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTagsOfResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListTagsOfResourceInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ListTagsOfResourceInput"}
+
+ if s.ResourceArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("ResourceArn", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResourceOutput
+type ListTagsOfResourceOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // If this value is returned, there are additional results to be displayed.
+ // To retrieve them, call ListTagsOfResource again, with NextToken set to this
+ // value.
+ NextToken *string `type:"string"`
+
+ // The tags currently associated with the Amazon DynamoDB resource.
+ Tags []Tag `type:"list"`
+}
+
+// String returns the string representation
+func (s ListTagsOfResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTagsOfResourceOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s ListTagsOfResourceOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the properties of a local secondary index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/LocalSecondaryIndex
+type LocalSecondaryIndex struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the local secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // The complete key schema for the local secondary index, consisting of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // KeySchema is a required field
+ KeySchema []KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // local secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ //
+ // Projection is a required field
+ Projection *Projection `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s LocalSecondaryIndex) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LocalSecondaryIndex) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LocalSecondaryIndex) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "LocalSecondaryIndex"}
+
+ if s.IndexName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+
+ if s.KeySchema == nil {
+ invalidParams.Add(aws.NewErrParamRequired("KeySchema"))
+ }
+ if s.KeySchema != nil && len(s.KeySchema) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("KeySchema", 1))
+ }
+
+ if s.Projection == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Projection"))
+ }
+ if s.KeySchema != nil {
+ for i, v := range s.KeySchema {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.Projection != nil {
+ if err := s.Projection.Validate(); err != nil {
+ invalidParams.AddNested("Projection", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the properties of a local secondary index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/LocalSecondaryIndexDescription
+type LocalSecondaryIndexDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the index.
+ IndexArn *string `type:"string"`
+
+ // Represents the name of the local secondary index.
+ IndexName *string `min:"3" type:"string"`
+
+ // The total size of the specified index, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ IndexSizeBytes *int64 `type:"long"`
+
+ // The number of items in the specified index. DynamoDB updates this value approximately
+ // every six hours. Recent changes might not be reflected in this value.
+ ItemCount *int64 `type:"long"`
+
+ // The complete key schema for the local secondary index, consisting of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []KeySchemaElement `min:"1" type:"list"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ Projection *Projection `type:"structure"`
+}
+
+// String returns the string representation
+func (s LocalSecondaryIndexDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LocalSecondaryIndexDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the properties of a local secondary index for the table when the
+// backup was created.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/LocalSecondaryIndexInfo
+type LocalSecondaryIndexInfo struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the name of the local secondary index.
+ IndexName *string `min:"3" type:"string"`
+
+ // The complete key schema for a local secondary index, which consists of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []KeySchemaElement `min:"1" type:"list"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ Projection *Projection `type:"structure"`
+}
+
+// String returns the string representation
+func (s LocalSecondaryIndexInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LocalSecondaryIndexInfo) GoString() string {
+ return s.String()
+}
+
+// The description of the point in time settings applied to the table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PointInTimeRecoveryDescription
+type PointInTimeRecoveryDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the earliest point in time you can restore your table to. It You
+ // can restore your table to any point in time during the last 35 days.
+ EarliestRestorableDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // LatestRestorableDateTime is typically 5 minutes before the current time.
+ LatestRestorableDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // The current state of point in time recovery:
+ //
+ // * ENABLING - Point in time recovery is being enabled.
+ //
+ // * ENABLED - Point in time recovery is enabled.
+ //
+ // * DISABLED - Point in time recovery is disabled.
+ PointInTimeRecoveryStatus PointInTimeRecoveryStatus `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s PointInTimeRecoveryDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PointInTimeRecoveryDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the settings used to enable point in time recovery.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PointInTimeRecoverySpecification
+type PointInTimeRecoverySpecification struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether point in time recovery is enabled (true) or disabled (false)
+ // on the table.
+ //
+ // PointInTimeRecoveryEnabled is a required field
+ PointInTimeRecoveryEnabled *bool `type:"boolean" required:"true"`
+}
+
+// String returns the string representation
+func (s PointInTimeRecoverySpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PointInTimeRecoverySpecification) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PointInTimeRecoverySpecification) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "PointInTimeRecoverySpecification"}
+
+ if s.PointInTimeRecoveryEnabled == nil {
+ invalidParams.Add(aws.NewErrParamRequired("PointInTimeRecoveryEnabled"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents attributes that are copied (projected) from the table into an
+// index. These are in addition to the primary key attributes and index key
+// attributes, which are automatically projected.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Projection
+type Projection struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the non-key attribute names which will be projected into the index.
+ //
+ // For local secondary indexes, the total count of NonKeyAttributes summed across
+ // all of the local secondary indexes, must not exceed 20. If you project the
+ // same attribute into two different indexes, this counts as two distinct attributes
+ // when determining the total.
+ NonKeyAttributes []string `min:"1" type:"list"`
+
+ // The set of attributes that are projected into the index:
+ //
+ // * KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // * INCLUDE - Only the specified table attributes are projected into the
+ // index. The list of projected attributes are in NonKeyAttributes.
+ //
+ // * ALL - All of the table attributes are projected into the index.
+ ProjectionType ProjectionType `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s Projection) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Projection) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Projection) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "Projection"}
+ if s.NonKeyAttributes != nil && len(s.NonKeyAttributes) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("NonKeyAttributes", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the provisioned throughput settings for a specified table or index.
+// The settings can be modified using the UpdateTable operation.
+//
+// For current minimum and maximum provisioned throughput values, see Limits
+// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+// in the Amazon DynamoDB Developer Guide.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ProvisionedThroughput
+type ProvisionedThroughput struct {
+ _ struct{} `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException. For more information, see Specifying
+ // Read and Write Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // ReadCapacityUnits is a required field
+ ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException. For more information, see Specifying Read and Write
+ // Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // WriteCapacityUnits is a required field
+ WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"`
+}
+
+// String returns the string representation
+func (s ProvisionedThroughput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ProvisionedThroughput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ProvisionedThroughput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ProvisionedThroughput"}
+
+ if s.ReadCapacityUnits == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ReadCapacityUnits"))
+ }
+ if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("ReadCapacityUnits", 1))
+ }
+
+ if s.WriteCapacityUnits == nil {
+ invalidParams.Add(aws.NewErrParamRequired("WriteCapacityUnits"))
+ }
+ if s.WriteCapacityUnits != nil && *s.WriteCapacityUnits < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("WriteCapacityUnits", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the provisioned throughput settings for the table, consisting
+// of read and write capacity units, along with data about increases and decreases.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ProvisionedThroughputDescription
+type ProvisionedThroughputDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The date and time of the last provisioned throughput decrease for this table.
+ LastDecreaseDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // The date and time of the last provisioned throughput increase for this table.
+ LastIncreaseDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // The number of provisioned throughput decreases for this table during this
+ // UTC calendar day. For current maximums on provisioned throughput decreases,
+ // see Limits (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ NumberOfDecreasesToday *int64 `min:"1" type:"long"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException. Eventually consistent reads require
+ // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits
+ // per second provides 100 eventually consistent ReadCapacityUnits per second.
+ ReadCapacityUnits *int64 `min:"1" type:"long"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException.
+ WriteCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s ProvisionedThroughputDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ProvisionedThroughputDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the input of a PutItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItemInput
+type PutItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // A condition that must be satisfied in order for a conditional PutItem operation
+ // to succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // * Functions: attribute_exists | attribute_not_exists | attribute_type
+ // | contains | begins_with | size
+ //
+ // These function names are case-sensitive.
+ //
+ // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // * Logical operators: AND | OR | NOT
+ //
+ // For more information on condition expressions, see Specifying Conditions
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see ConditionalOperator (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator ConditionalOperator `type:"string" enum:"true"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Expected map[string]ExpectedAttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Specifying Conditions
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]AttributeValue `type:"map"`
+
+ // A map of attribute name/value pairs, one for each attribute. Only the primary
+ // key attributes are required; you can optionally provide other attribute name-value
+ // pairs for the item.
+ //
+ // You must provide all of the attributes for the primary key. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide both values for both the
+ // partition key and the sort key.
+ //
+ // If you specify any attributes that are part of an index key, then the data
+ // types for those attributes must match those of the schema in the table's
+ // attribute definition.
+ //
+ // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // Each element in the Item map is an AttributeValue object.
+ //
+ // Item is a required field
+ Item map[string]AttributeValue `type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem, do not access
+ // any indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity ReturnConsumedCapacity `type:"string" enum:"true"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections, if any, that were
+ // modified during the operation are returned in the response. If set to NONE
+ // (the default), no statistics are returned.
+ ReturnItemCollectionMetrics ReturnItemCollectionMetrics `type:"string" enum:"true"`
+
+ // Use ReturnValues if you want to get the item attributes as they appeared
+ // before they were updated with the PutItem request. For PutItem, the valid
+ // values are:
+ //
+ // * NONE - If ReturnValues is not specified, or if its value is NONE, then
+ // nothing is returned. (This setting is the default for ReturnValues.)
+ //
+ // * ALL_OLD - If PutItem overwrote an attribute name-value pair, then the
+ // content of the old item is returned.
+ //
+ // The ReturnValues parameter is used by several DynamoDB operations; however,
+ // PutItem does not recognize any values other than NONE or ALL_OLD.
+ ReturnValues ReturnValue `type:"string" enum:"true"`
+
+ // The name of the table to contain the item.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutItemInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "PutItemInput"}
+
+ if s.Item == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Item"))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a PutItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItemOutput
+type PutItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The attribute values as they appeared before the PutItem operation, but only
+ // if ReturnValues is specified as ALL_OLD in the request. Each element consists
+ // of an attribute name and an attribute value.
+ Attributes map[string]AttributeValue `type:"map"`
+
+ // The capacity units consumed by the PutItem operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the
+ // table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see Provisioned Throughput (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // Information about item collections, if any, that were affected by the PutItem
+ // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
+ // parameter was specified. If the table does not have any local secondary indexes,
+ // this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // * ItemCollectionKey - The partition key value of the item collection.
+ // This is the same as the partition key value of the item itself.
+ //
+ // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper
+ // bound for the estimate. The estimate includes the size of all the items
+ // in the table, plus the size of all attributes projected into all of the
+ // local secondary indexes on that table. Use this estimate to measure whether
+ // a local secondary index is approaching its size limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutItemOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s PutItemOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents a request to perform a PutItem operation on an item.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutRequest
+type PutRequest struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attribute name to attribute values, representing the primary key
+ // of an item to be processed by PutItem. All of the table's primary key attributes
+ // must be specified, and their data types must match those of the table's key
+ // schema. If any attributes are present in the item which are part of an index
+ // key schema for the table, their types must match the index key schema.
+ //
+ // Item is a required field
+ Item map[string]AttributeValue `type:"map" required:"true"`
+}
+
+// String returns the string representation
+func (s PutRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutRequest) GoString() string {
+ return s.String()
+}
+
+// Represents the input of a Query operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/QueryInput
+type QueryInput struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more information,
+ // see AttributesToGet (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributesToGet []string `min:"1" type:"list"`
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see ConditionalOperator (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator ConditionalOperator `type:"string" enum:"true"`
+
+ // Determines the read consistency model: If set to true, then the operation
+ // uses strongly consistent reads; otherwise, the operation uses eventually
+ // consistent reads.
+ //
+ // Strongly consistent reads are not supported on global secondary indexes.
+ // If you query a global secondary index with ConsistentRead set to true, you
+ // will receive a ValidationException.
+ ConsistentRead *bool `type:"boolean"`
+
+ // The primary key of the first item that this operation will evaluate. Use
+ // the value that was returned for LastEvaluatedKey in the previous operation.
+ //
+ // The data type for ExclusiveStartKey must be String, Number or Binary. No
+ // set data types are allowed.
+ ExclusiveStartKey map[string]AttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Specifying Conditions
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]AttributeValue `type:"map"`
+
+ // A string that contains conditions that DynamoDB applies after the Query operation,
+ // but before the data is returned to you. Items that do not satisfy the FilterExpression
+ // criteria are not returned.
+ //
+ // A FilterExpression does not allow key attributes. You cannot define a filter
+ // expression based on a partition key or a sort key.
+ //
+ // A FilterExpression is applied after the items have already been read; the
+ // process of filtering does not consume any additional read capacity units.
+ //
+ // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults)
+ // in the Amazon DynamoDB Developer Guide.
+ FilterExpression *string `type:"string"`
+
+ // The name of an index to query. This index can be any local secondary index
+ // or global secondary index on the table. Note that if you use the IndexName
+ // parameter, you must also provide TableName.
+ IndexName *string `min:"3" type:"string"`
+
+ // The condition that specifies the key value(s) for items to be retrieved by
+ // the Query action.
+ //
+ // The condition must perform an equality test on a single partition key value.
+ //
+ // The condition can optionally perform one of several comparison tests on a
+ // single sort key value. This allows Query to retrieve one item with a given
+ // partition key value and sort key value, or several items that have the same
+ // partition key value but different sort key values.
+ //
+ // The partition key equality test is required, and must be specified in the
+ // following format:
+ //
+ // partitionKeyName=:partitionkeyval
+ //
+ // If you also want to provide a condition for the sort key, it must be combined
+ // using AND with the condition for the sort key. Following is an example, using
+ // the = comparison operator for the sort key:
+ //
+ // partitionKeyName=:partitionkeyvalANDsortKeyName=:sortkeyval
+ //
+ // Valid comparisons for the sort key condition are as follows:
+ //
+ // * sortKeyName=:sortkeyval - true if the sort key value is equal to :sortkeyval.
+ //
+ // * sortKeyName<:sortkeyval - true if the sort key value is less than :sortkeyval.
+ //
+ // * sortKeyName<=:sortkeyval - true if the sort key value is less than or
+ // equal to :sortkeyval.
+ //
+ // * sortKeyName>:sortkeyval - true if the sort key value is greater than
+ // :sortkeyval.
+ //
+ // * sortKeyName>= :sortkeyval - true if the sort key value is greater than
+ // or equal to :sortkeyval.
+ //
+ // * sortKeyNameBETWEEN:sortkeyval1AND:sortkeyval2 - true if the sort key
+ // value is greater than or equal to :sortkeyval1, and less than or equal
+ // to :sortkeyval2.
+ //
+ // * begins_with (sortKeyName, :sortkeyval) - true if the sort key value
+ // begins with a particular operand. (You cannot use this function with a
+ // sort key that is of type Number.) Note that the function name begins_with
+ // is case-sensitive.
+ //
+ // Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval
+ // and :sortval with actual values at runtime.
+ //
+ // You can optionally use the ExpressionAttributeNames parameter to replace
+ // the names of the partition key and sort key with placeholder tokens. This
+ // option might be necessary if an attribute name conflicts with a DynamoDB
+ // reserved word. For example, the following KeyConditionExpression parameter
+ // causes an error because Size is a reserved word:
+ //
+ // * Size = :myval
+ //
+ // To work around this, define a placeholder (such a #S) to represent the attribute
+ // name Size. KeyConditionExpression then is as follows:
+ //
+ // * #S = :myval
+ //
+ // For a list of reserved words, see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // For more information on ExpressionAttributeNames and ExpressionAttributeValues,
+ // see Using Placeholders for Attribute Names and Values (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html)
+ // in the Amazon DynamoDB Developer Guide.
+ KeyConditionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use KeyConditionExpression instead. For more
+ // information, see KeyConditions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ KeyConditions map[string]Condition `type:"map"`
+
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while processing
+ // the results, it stops the operation and returns the matching values up to
+ // that point, and a key in LastEvaluatedKey to apply in a subsequent operation,
+ // so that you can pick up where you left off. Also, if the processed data set
+ // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation
+ // and returns the matching values up to the limit, and a key in LastEvaluatedKey
+ // to apply in a subsequent operation to continue the operation. For more information,
+ // see Query and Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document.
+ // The attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned.
+ // If any of the requested attributes are not found, they will not appear in
+ // the result.
+ //
+ // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProjectionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see QueryFilter (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html)
+ // in the Amazon DynamoDB Developer Guide.
+ QueryFilter map[string]Condition `type:"map"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem, do not access
+ // any indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity ReturnConsumedCapacity `type:"string" enum:"true"`
+
+ // Specifies the order for index traversal: If true (default), the traversal
+ // is performed in ascending order; if false, the traversal is performed in
+ // descending order.
+ //
+ // Items with the same partition key value are stored in sorted order by sort
+ // key. If the sort key data type is Number, the results are stored in numeric
+ // order. For type String, the results are stored in order of UTF-8 bytes. For
+ // type Binary, DynamoDB treats each byte of the binary data as unsigned.
+ //
+ // If ScanIndexForward is true, DynamoDB returns the results in the order in
+ // which they are stored (by sort key value). This is the default behavior.
+ // If ScanIndexForward is false, DynamoDB reads the results in reverse order
+ // by sort key value, and then returns the results to the client.
+ ScanIndexForward *bool `type:"boolean"`
+
+ // The attributes to be returned in the result. You can retrieve all item attributes,
+ // specific item attributes, the count of matching items, or in the case of
+ // an index, some or all of the attributes projected into the index.
+ //
+ // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified
+ // table or index. If you query a local secondary index, then for each matching
+ // item in the index DynamoDB will fetch the entire item from the parent
+ // table. If the index is configured to project all item attributes, then
+ // all of the data can be obtained from the local secondary index, and no
+ // fetching is required.
+ //
+ // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves
+ // all attributes that have been projected into the index. If the index is
+ // configured to project all attributes, this return value is equivalent
+ // to specifying ALL_ATTRIBUTES.
+ //
+ // * COUNT - Returns the number of matching items, rather than the matching
+ // items themselves.
+ //
+ // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet.
+ // This return value is equivalent to specifying AttributesToGet without
+ // specifying any value for Select.
+ //
+ // If you query or scan a local secondary index and request only attributes
+ // that are projected into that index, the operation will read only the index
+ // and not the table. If any of the requested attributes are not projected
+ // into the local secondary index, DynamoDB will fetch each of these attributes
+ // from the parent table. This extra fetching incurs additional throughput
+ // cost and latency.
+ //
+ // If you query or scan a global secondary index, you can only request attributes
+ // that are projected into the index. Global secondary index queries cannot
+ // fetch attributes from the parent table.
+ //
+ // If neither Select nor AttributesToGet are specified, DynamoDB defaults to
+ // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when
+ // accessing an index. You cannot use both Select and AttributesToGet together
+ // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES.
+ // (This usage is equivalent to specifying AttributesToGet without any value
+ // for Select.)
+ //
+ // If you use the ProjectionExpression parameter, then the value for Select
+ // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an
+ // error.
+ Select Select `type:"string" enum:"true"`
+
+ // The name of the table containing the requested items.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s QueryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *QueryInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "QueryInput"}
+ if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("AttributesToGet", 1))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("Limit", 1))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+ if s.KeyConditions != nil {
+ for i, v := range s.KeyConditions {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeyConditions", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.QueryFilter != nil {
+ for i, v := range s.QueryFilter {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueryFilter", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a Query operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/QueryOutput
+type QueryOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The capacity units consumed by the Query operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the
+ // table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified For more information,
+ // see Provisioned Throughput (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // The number of items in the response.
+ //
+ // If you used a QueryFilter in the request, then Count is the number of items
+ // returned after the filter was applied, and ScannedCount is the number of
+ // matching items before the filter was applied.
+ //
+ // If you did not use a filter in the request, then Count and ScannedCount are
+ // the same.
+ Count *int64 `type:"integer"`
+
+ // An array of item attributes that match the query criteria. Each element in
+ // this array consists of an attribute name and the value for that attribute.
+ Items []map[string]AttributeValue `type:"list"`
+
+ // The primary key of the item where the operation stopped, inclusive of the
+ // previous result set. Use this value to start a new operation, excluding this
+ // value in the new request.
+ //
+ // If LastEvaluatedKey is empty, then the "last page" of results has been processed
+ // and there is no more data to be retrieved.
+ //
+ // If LastEvaluatedKey is not empty, it does not necessarily mean that there
+ // is more data in the result set. The only way to know when you have reached
+ // the end of the result set is when LastEvaluatedKey is empty.
+ LastEvaluatedKey map[string]AttributeValue `type:"map"`
+
+ // The number of items evaluated, before any QueryFilter is applied. A high
+ // ScannedCount value with few, or no, Count results indicates an inefficient
+ // Query operation. For more information, see Count and ScannedCount (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // If you did not use a filter in the request, then ScannedCount is the same
+ // as Count.
+ ScannedCount *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s QueryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueryOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s QueryOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the properties of a replica.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Replica
+type Replica struct {
+ _ struct{} `type:"structure"`
+
+ // The region where the replica needs to be created.
+ RegionName *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Replica) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Replica) GoString() string {
+ return s.String()
+}
+
+// Contains the details of the replica.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ReplicaDescription
+type ReplicaDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the region.
+ RegionName *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ReplicaDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the properties of a global secondary index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ReplicaGlobalSecondaryIndexSettingsDescription
+type ReplicaGlobalSecondaryIndexSettingsDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // The current status of the global secondary index:
+ //
+ // * CREATING - The global secondary index is being created.
+ //
+ // * UPDATING - The global secondary index is being updated.
+ //
+ // * DELETING - The global secondary index is being deleted.
+ //
+ // * ACTIVE - The global secondary index is ready for use.
+ IndexStatus IndexStatus `type:"string" enum:"true"`
+
+ // Autoscaling settings for a global secondary index replica's read capacity
+ // units.
+ ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException.
+ ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
+
+ // AutoScaling settings for a global secondary index replica's write capacity
+ // units.
+ ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException.
+ ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s ReplicaGlobalSecondaryIndexSettingsDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaGlobalSecondaryIndexSettingsDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the settings of a global secondary index for a global table that
+// will be modified.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ReplicaGlobalSecondaryIndexSettingsUpdate
+type ReplicaGlobalSecondaryIndexSettingsUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // Autoscaling settings for managing a global secondary index replica's read
+ // capacity units.
+ ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException.
+ ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s ReplicaGlobalSecondaryIndexSettingsUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaGlobalSecondaryIndexSettingsUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"}
+
+ if s.IndexName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.ProvisionedReadCapacityUnits != nil && *s.ProvisionedReadCapacityUnits < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("ProvisionedReadCapacityUnits", 1))
+ }
+ if s.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
+ if err := s.ProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the properties of a replica.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ReplicaSettingsDescription
+type ReplicaSettingsDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The region name of the replica.
+ //
+ // RegionName is a required field
+ RegionName *string `type:"string" required:"true"`
+
+ // Replica global secondary index settings for the global table.
+ ReplicaGlobalSecondaryIndexSettings []ReplicaGlobalSecondaryIndexSettingsDescription `type:"list"`
+
+ // Autoscaling settings for a global table replica's read capacity units.
+ ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException. For more information, see Specifying
+ // Read and Write Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ ReplicaProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
+
+ // AutoScaling settings for a global table replica's write capacity units.
+ ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException. For more information, see Specifying Read and Write
+ // Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ ReplicaProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
+
+ // The current state of the region:
+ //
+ // * CREATING - The region is being created.
+ //
+ // * UPDATING - The region is being updated.
+ //
+ // * DELETING - The region is being deleted.
+ //
+ // * ACTIVE - The region is ready for use.
+ ReplicaStatus ReplicaStatus `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s ReplicaSettingsDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaSettingsDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the settings for a global table in a region that will be modified.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ReplicaSettingsUpdate
+type ReplicaSettingsUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The region of the replica to be added.
+ //
+ // RegionName is a required field
+ RegionName *string `type:"string" required:"true"`
+
+ // Represents the settings of a global secondary index for a global table that
+ // will be modified.
+ ReplicaGlobalSecondaryIndexSettingsUpdate []ReplicaGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"`
+
+ // Autoscaling settings for managing a global table replica's read capacity
+ // units.
+ ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException. For more information, see Specifying
+ // Read and Write Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ ReplicaProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s ReplicaSettingsUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaSettingsUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicaSettingsUpdate) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ReplicaSettingsUpdate"}
+
+ if s.RegionName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RegionName"))
+ }
+ if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil && len(s.ReplicaGlobalSecondaryIndexSettingsUpdate) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("ReplicaGlobalSecondaryIndexSettingsUpdate", 1))
+ }
+ if s.ReplicaProvisionedReadCapacityUnits != nil && *s.ReplicaProvisionedReadCapacityUnits < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("ReplicaProvisionedReadCapacityUnits", 1))
+ }
+ if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil {
+ for i, v := range s.ReplicaGlobalSecondaryIndexSettingsUpdate {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexSettingsUpdate", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
+ if err := s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
+ invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents one of the following:
+//
+// * A new replica to be added to an existing global table.
+//
+// * New parameters for an existing replica.
+//
+// * An existing replica to be removed from an existing global table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ReplicaUpdate
+type ReplicaUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The parameters required for creating a replica on an existing global table.
+ Create *CreateReplicaAction `type:"structure"`
+
+ // The name of the existing replica to be removed.
+ Delete *DeleteReplicaAction `type:"structure"`
+}
+
+// String returns the string representation
+func (s ReplicaUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicaUpdate) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ReplicaUpdate"}
+ if s.Create != nil {
+ if err := s.Create.Validate(); err != nil {
+ invalidParams.AddNested("Create", err.(aws.ErrInvalidParams))
+ }
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Contains details for the restore.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreSummary
+type RestoreSummary struct {
+ _ struct{} `type:"structure"`
+
+ // Point in time or source backup time.
+ //
+ // RestoreDateTime is a required field
+ RestoreDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"`
+
+ // Indicates if a restore is in progress or not.
+ //
+ // RestoreInProgress is a required field
+ RestoreInProgress *bool `type:"boolean" required:"true"`
+
+ // ARN of the backup from which the table was restored.
+ SourceBackupArn *string `min:"37" type:"string"`
+
+ // ARN of the source table of the backup that is being restored.
+ SourceTableArn *string `type:"string"`
+}
+
+// String returns the string representation
+func (s RestoreSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreSummary) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackupInput
+type RestoreTableFromBackupInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN associated with the backup.
+ //
+ // BackupArn is a required field
+ BackupArn *string `min:"37" type:"string" required:"true"`
+
+ // The name of the new table to which the backup must be restored.
+ //
+ // TargetTableName is a required field
+ TargetTableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s RestoreTableFromBackupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreTableFromBackupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreTableFromBackupInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "RestoreTableFromBackupInput"}
+
+ if s.BackupArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("BackupArn"))
+ }
+ if s.BackupArn != nil && len(*s.BackupArn) < 37 {
+ invalidParams.Add(aws.NewErrParamMinLen("BackupArn", 37))
+ }
+
+ if s.TargetTableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TargetTableName"))
+ }
+ if s.TargetTableName != nil && len(*s.TargetTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TargetTableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackupOutput
+type RestoreTableFromBackupOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The description of the table created from an existing backup.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s RestoreTableFromBackupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreTableFromBackupOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s RestoreTableFromBackupOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTimeInput
+type RestoreTableToPointInTimeInput struct {
+ _ struct{} `type:"structure"`
+
+ // Time in the past to restore the table to.
+ RestoreDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // Name of the source table that is being restored.
+ //
+ // SourceTableName is a required field
+ SourceTableName *string `min:"3" type:"string" required:"true"`
+
+ // The name of the new table to which it must be restored to.
+ //
+ // TargetTableName is a required field
+ TargetTableName *string `min:"3" type:"string" required:"true"`
+
+ // Restore the table to the latest possible time. LatestRestorableDateTime is
+ // typically 5 minutes before the current time.
+ UseLatestRestorableTime *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s RestoreTableToPointInTimeInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreTableToPointInTimeInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreTableToPointInTimeInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "RestoreTableToPointInTimeInput"}
+
+ if s.SourceTableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("SourceTableName"))
+ }
+ if s.SourceTableName != nil && len(*s.SourceTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("SourceTableName", 3))
+ }
+
+ if s.TargetTableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TargetTableName"))
+ }
+ if s.TargetTableName != nil && len(*s.TargetTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TargetTableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTimeOutput
+type RestoreTableToPointInTimeOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Represents the properties of a table.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s RestoreTableToPointInTimeOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreTableToPointInTimeOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s RestoreTableToPointInTimeOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// The description of the server-side encryption status on the specified table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/SSEDescription
+type SSEDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The KMS master key ARN used for the KMS encryption.
+ KMSMasterKeyArn *string `type:"string"`
+
+ // Server-side encryption type:
+ //
+ // * AES256 - Server-side encryption which uses the AES256 algorithm.
+ //
+ // * KMS - Server-side encryption which uses AWS Key Management Service.
+ SSEType SSEType `type:"string" enum:"true"`
+
+ // The current state of server-side encryption:
+ //
+ // * ENABLING - Server-side encryption is being enabled.
+ //
+ // * ENABLED - Server-side encryption is enabled.
+ //
+ // * DISABLING - Server-side encryption is being disabled.
+ //
+ // * DISABLED - Server-side encryption is disabled.
+ //
+ // * UPDATING - Server-side encryption is being updated.
+ Status SSEStatus `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s SSEDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SSEDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the settings used to enable server-side encryption.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/SSESpecification
+type SSESpecification struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether server-side encryption is enabled (true) or disabled (false)
+ // on the table.
+ Enabled *bool `type:"boolean"`
+
+ // The KMS Master Key (CMK) which should be used for the KMS encryption. To
+ // specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or
+ // alias ARN. Note that you should only provide this parameter if the key is
+ // different from the default DynamoDB KMS Master Key alias/aws/dynamodb.
+ KMSMasterKeyId *string `type:"string"`
+
+ // Server-side encryption type:
+ //
+ // * AES256 - Server-side encryption which uses the AES256 algorithm.
+ //
+ // * KMS - Server-side encryption which uses AWS Key Management Service.
+ // (default)
+ SSEType SSEType `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s SSESpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SSESpecification) GoString() string {
+ return s.String()
+}
+
+// Represents the input of a Scan operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ScanInput
+type ScanInput struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more information,
+ // see AttributesToGet (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributesToGet []string `min:"1" type:"list"`
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see ConditionalOperator (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator ConditionalOperator `type:"string" enum:"true"`
+
+ // A Boolean value that determines the read consistency model during the scan:
+ //
+ // * If ConsistentRead is false, then the data returned from Scan might not
+ // contain the results from other recently completed write operations (PutItem,
+ // UpdateItem or DeleteItem).
+ //
+ // * If ConsistentRead is true, then all of the write operations that completed
+ // before the Scan began are guaranteed to be contained in the Scan response.
+ //
+ // The default setting for ConsistentRead is false.
+ //
+ // The ConsistentRead parameter is not supported on global secondary indexes.
+ // If you scan a global secondary index with ConsistentRead set to true, you
+ // will receive a ValidationException.
+ ConsistentRead *bool `type:"boolean"`
+
+ // The primary key of the first item that this operation will evaluate. Use
+ // the value that was returned for LastEvaluatedKey in the previous operation.
+ //
+ // The data type for ExclusiveStartKey must be String, Number or Binary. No
+ // set data types are allowed.
+ //
+ // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify
+ // the same segment whose previous Scan returned the corresponding value of
+ // LastEvaluatedKey.
+ ExclusiveStartKey map[string]AttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Specifying Conditions
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]AttributeValue `type:"map"`
+
+ // A string that contains conditions that DynamoDB applies after the Scan operation,
+ // but before the data is returned to you. Items that do not satisfy the FilterExpression
+ // criteria are not returned.
+ //
+ // A FilterExpression is applied after the items have already been read; the
+ // process of filtering does not consume any additional read capacity units.
+ //
+ // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults)
+ // in the Amazon DynamoDB Developer Guide.
+ FilterExpression *string `type:"string"`
+
+ // The name of a secondary index to scan. This index can be any local secondary
+ // index or global secondary index. Note that if you use the IndexName parameter,
+ // you must also provide TableName.
+ IndexName *string `min:"3" type:"string"`
+
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while processing
+ // the results, it stops the operation and returns the matching values up to
+ // that point, and a key in LastEvaluatedKey to apply in a subsequent operation,
+ // so that you can pick up where you left off. Also, if the processed data set
+ // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation
+ // and returns the matching values up to the limit, and a key in LastEvaluatedKey
+ // to apply in a subsequent operation to continue the operation. For more information,
+ // see Query and Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // A string that identifies one or more attributes to retrieve from the specified
+ // table or index. These attributes can include scalars, sets, or elements of
+ // a JSON document. The attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned.
+ // If any of the requested attributes are not found, they will not appear in
+ // the result.
+ //
+ // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProjectionExpression *string `type:"string"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem, do not access
+ // any indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity ReturnConsumedCapacity `type:"string" enum:"true"`
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see ScanFilter (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ScanFilter map[string]Condition `type:"map"`
+
+ // For a parallel Scan request, Segment identifies an individual segment to
+ // be scanned by an application worker.
+ //
+ // Segment IDs are zero-based, so the first segment is always 0. For example,
+ // if you want to use four application threads to scan a table or an index,
+ // then the first thread specifies a Segment value of 0, the second thread specifies
+ // 1, and so on.
+ //
+ // The value of LastEvaluatedKey returned from a parallel Scan request must
+ // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan
+ // operation.
+ //
+ // The value for Segment must be greater than or equal to 0, and less than the
+ // value provided for TotalSegments.
+ //
+ // If you provide Segment, you must also provide TotalSegments.
+ Segment *int64 `type:"integer"`
+
+ // The attributes to be returned in the result. You can retrieve all item attributes,
+ // specific item attributes, the count of matching items, or in the case of
+ // an index, some or all of the attributes projected into the index.
+ //
+ // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified
+ // table or index. If you query a local secondary index, then for each matching
+ // item in the index DynamoDB will fetch the entire item from the parent
+ // table. If the index is configured to project all item attributes, then
+ // all of the data can be obtained from the local secondary index, and no
+ // fetching is required.
+ //
+ // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves
+ // all attributes that have been projected into the index. If the index is
+ // configured to project all attributes, this return value is equivalent
+ // to specifying ALL_ATTRIBUTES.
+ //
+ // * COUNT - Returns the number of matching items, rather than the matching
+ // items themselves.
+ //
+ // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet.
+ // This return value is equivalent to specifying AttributesToGet without
+ // specifying any value for Select.
+ //
+ // If you query or scan a local secondary index and request only attributes
+ // that are projected into that index, the operation will read only the index
+ // and not the table. If any of the requested attributes are not projected
+ // into the local secondary index, DynamoDB will fetch each of these attributes
+ // from the parent table. This extra fetching incurs additional throughput
+ // cost and latency.
+ //
+ // If you query or scan a global secondary index, you can only request attributes
+ // that are projected into the index. Global secondary index queries cannot
+ // fetch attributes from the parent table.
+ //
+ // If neither Select nor AttributesToGet are specified, DynamoDB defaults to
+ // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when
+ // accessing an index. You cannot use both Select and AttributesToGet together
+ // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES.
+ // (This usage is equivalent to specifying AttributesToGet without any value
+ // for Select.)
+ //
+ // If you use the ProjectionExpression parameter, then the value for Select
+ // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an
+ // error.
+ Select Select `type:"string" enum:"true"`
+
+ // The name of the table containing the requested items; or, if you provide
+ // IndexName, the name of the table to which that index belongs.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // For a parallel Scan request, TotalSegments represents the total number of
+ // segments into which the Scan operation will be divided. The value of TotalSegments
+ // corresponds to the number of application workers that will perform the parallel
+ // scan. For example, if you want to use four application threads to scan a
+ // table or an index, specify a TotalSegments value of 4.
+ //
+ // The value for TotalSegments must be greater than or equal to 1, and less
+ // than or equal to 1000000. If you specify a TotalSegments value of 1, the
+ // Scan operation will be sequential rather than parallel.
+ //
+ // If you specify TotalSegments, you must also specify Segment.
+ TotalSegments *int64 `min:"1" type:"integer"`
+}
+
+// String returns the string representation
+func (s ScanInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ScanInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ScanInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "ScanInput"}
+ if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("AttributesToGet", 1))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("Limit", 1))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+ if s.TotalSegments != nil && *s.TotalSegments < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("TotalSegments", 1))
+ }
+ if s.ScanFilter != nil {
+ for i, v := range s.ScanFilter {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScanFilter", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of a Scan operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ScanOutput
+type ScanOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The capacity units consumed by the Scan operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the
+ // table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see Provisioned Throughput (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // The number of items in the response.
+ //
+ // If you set ScanFilter in the request, then Count is the number of items returned
+ // after the filter was applied, and ScannedCount is the number of matching
+ // items before the filter was applied.
+ //
+ // If you did not use a filter in the request, then Count is the same as ScannedCount.
+ Count *int64 `type:"integer"`
+
+ // An array of item attributes that match the scan criteria. Each element in
+ // this array consists of an attribute name and the value for that attribute.
+ Items []map[string]AttributeValue `type:"list"`
+
+ // The primary key of the item where the operation stopped, inclusive of the
+ // previous result set. Use this value to start a new operation, excluding this
+ // value in the new request.
+ //
+ // If LastEvaluatedKey is empty, then the "last page" of results has been processed
+ // and there is no more data to be retrieved.
+ //
+ // If LastEvaluatedKey is not empty, it does not necessarily mean that there
+ // is more data in the result set. The only way to know when you have reached
+ // the end of the result set is when LastEvaluatedKey is empty.
+ LastEvaluatedKey map[string]AttributeValue `type:"map"`
+
+ // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount
+ // value with few, or no, Count results indicates an inefficient Scan operation.
+ // For more information, see Count and ScannedCount (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // If you did not use a filter in the request, then ScannedCount is the same
+ // as Count.
+ ScannedCount *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s ScanOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ScanOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s ScanOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Contains the details of the table when the backup was created.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/SourceTableDetails
+type SourceTableDetails struct {
+ _ struct{} `type:"structure"`
+
+ // Number of items in the table. Please note this is an approximate value.
+ ItemCount *int64 `type:"long"`
+
+ // Schema of the table.
+ //
+ // KeySchema is a required field
+ KeySchema []KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // Read IOPs and Write IOPS on the table when the backup was created.
+ //
+ // ProvisionedThroughput is a required field
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"`
+
+ // ARN of the table for which backup was created.
+ TableArn *string `type:"string"`
+
+ // Time when the source table was created.
+ //
+ // TableCreationDateTime is a required field
+ TableCreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"`
+
+ // Unique identifier for the table for which the backup was created.
+ //
+ // TableId is a required field
+ TableId *string `type:"string" required:"true"`
+
+ // The name of the table for which the backup was created.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // Size of the table in bytes. Please note this is an approximate value.
+ TableSizeBytes *int64 `type:"long"`
+}
+
+// String returns the string representation
+func (s SourceTableDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SourceTableDetails) GoString() string {
+ return s.String()
+}
+
+// Contains the details of the features enabled on the table when the backup
+// was created. For example, LSIs, GSIs, streams, TTL.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/SourceTableFeatureDetails
+type SourceTableFeatureDetails struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the GSI properties for the table when the backup was created.
+ // It includes the IndexName, KeySchema, Projection and ProvisionedThroughput
+ // for the GSIs on the table at the time of backup.
+ GlobalSecondaryIndexes []GlobalSecondaryIndexInfo `type:"list"`
+
+ // Represents the LSI properties for the table when the backup was created.
+ // It includes the IndexName, KeySchema and Projection for the LSIs on the table
+ // at the time of backup.
+ LocalSecondaryIndexes []LocalSecondaryIndexInfo `type:"list"`
+
+ // The description of the server-side encryption status on the table when the
+ // backup was created.
+ SSEDescription *SSEDescription `type:"structure"`
+
+ // Stream settings on the table when the backup was created.
+ StreamDescription *StreamSpecification `type:"structure"`
+
+ // Time to Live settings on the table when the backup was created.
+ TimeToLiveDescription *TimeToLiveDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s SourceTableFeatureDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SourceTableFeatureDetails) GoString() string {
+ return s.String()
+}
+
+// Represents the DynamoDB Streams configuration for a table in DynamoDB.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/StreamSpecification
+type StreamSpecification struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether DynamoDB Streams is enabled (true) or disabled (false)
+ // on the table.
+ StreamEnabled *bool `type:"boolean"`
+
+ // When an item in the table is modified, StreamViewType determines what information
+ // is written to the stream for this table. Valid values for StreamViewType
+ // are:
+ //
+ // * KEYS_ONLY - Only the key attributes of the modified item are written
+ // to the stream.
+ //
+ // * NEW_IMAGE - The entire item, as it appears after it was modified, is
+ // written to the stream.
+ //
+ // * OLD_IMAGE - The entire item, as it appeared before it was modified,
+ // is written to the stream.
+ //
+ // * NEW_AND_OLD_IMAGES - Both the new and the old item images of the item
+ // are written to the stream.
+ StreamViewType StreamViewType `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s StreamSpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StreamSpecification) GoString() string {
+ return s.String()
+}
+
+// Represents the properties of a table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TableDescription
+type TableDescription struct {
+ _ struct{} `type:"structure"`
+
+ // An array of AttributeDefinition objects. Each of these objects describes
+ // one attribute in the table and index key schema.
+ //
+ // Each AttributeDefinition object in this array is composed of:
+ //
+ // * AttributeName - The name of the attribute.
+ //
+ // * AttributeType - The data type for the attribute.
+ AttributeDefinitions []AttributeDefinition `type:"list"`
+
+ // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/)
+ // format.
+ CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
+
+ // The global secondary indexes, if any, on the table. Each index is scoped
+ // to a given partition key value. Each element is composed of:
+ //
+ // * Backfilling - If true, then the index is currently in the backfilling
+ // phase. Backfilling occurs only when a new global secondary index is added
+ // to the table; it is the process by which DynamoDB populates the new index
+ // with data from the table. (This attribute does not appear for indexes
+ // that were created during a CreateTable operation.)
+ //
+ // * IndexName - The name of the global secondary index.
+ //
+ // * IndexSizeBytes - The total size of the global secondary index, in bytes.
+ // DynamoDB updates this value approximately every six hours. Recent changes
+ // might not be reflected in this value.
+ //
+ // * IndexStatus - The current status of the global secondary index:
+ //
+ // CREATING - The index is being created.
+ //
+ // UPDATING - The index is being updated.
+ //
+ // DELETING - The index is being deleted.
+ //
+ // ACTIVE - The index is ready for use.
+ //
+ // * ItemCount - The number of items in the global secondary index. DynamoDB
+ // updates this value approximately every six hours. Recent changes might
+ // not be reflected in this value.
+ //
+ // * KeySchema - Specifies the complete index key schema. The attribute names
+ // in the key schema must be between 1 and 255 characters (inclusive). The
+ // key schema must begin with the same partition key as the table.
+ //
+ // * Projection - Specifies attributes that are copied (projected) from the
+ // table into the index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected. Each attribute
+ // specification is composed of:
+ //
+ // ProjectionType - One of the following:
+ //
+ // KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // INCLUDE - Only the specified table attributes are projected into the index.
+ // The list of projected attributes are in NonKeyAttributes.
+ //
+ // ALL - All of the table attributes are projected into the index.
+ //
+ // NonKeyAttributes - A list of one or more non-key attribute names that are
+ // projected into the secondary index. The total count of attributes provided
+ // in NonKeyAttributes, summed across all of the secondary indexes, must
+ // not exceed 20. If you project the same attribute into two different indexes,
+ // this counts as two distinct attributes when determining the total.
+ //
+ // * ProvisionedThroughput - The provisioned throughput settings for the
+ // global secondary index, consisting of read and write capacity units, along
+ // with data about increases and decreases.
+ //
+ // If the table is in the DELETING state, no information about indexes will
+ // be returned.
+ GlobalSecondaryIndexes []GlobalSecondaryIndexDescription `type:"list"`
+
+ // The number of items in the specified table. DynamoDB updates this value approximately
+ // every six hours. Recent changes might not be reflected in this value.
+ ItemCount *int64 `type:"long"`
+
+ // The primary key structure for the table. Each KeySchemaElement consists of:
+ //
+ // * AttributeName - The name of the attribute.
+ //
+ // * KeyType - The role of the attribute:
+ //
+ // HASH - partition key
+ //
+ // RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey)
+ // in the Amazon DynamoDB Developer Guide.
+ KeySchema []KeySchemaElement `min:"1" type:"list"`
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the latest stream
+ // for this table.
+ LatestStreamArn *string `min:"37" type:"string"`
+
+ // A timestamp, in ISO 8601 format, for this stream.
+ //
+ // Note that LatestStreamLabel is not a unique identifier for the stream, because
+ // it is possible that a stream from another table might have the same timestamp.
+ // However, the combination of the following three elements is guaranteed to
+ // be unique:
+ //
+ // * the AWS customer ID.
+ //
+ // * the table name.
+ //
+ // * the StreamLabel.
+ LatestStreamLabel *string `type:"string"`
+
+ // Represents one or more local secondary indexes on the table. Each index is
+ // scoped to a given partition key value. Tables with one or more local secondary
+ // indexes are subject to an item collection size limit, where the amount of
+ // data within a given item collection cannot exceed 10 GB. Each element is
+ // composed of:
+ //
+ // * IndexName - The name of the local secondary index.
+ //
+ // * KeySchema - Specifies the complete index key schema. The attribute names
+ // in the key schema must be between 1 and 255 characters (inclusive). The
+ // key schema must begin with the same partition key as the table.
+ //
+ // * Projection - Specifies attributes that are copied (projected) from the
+ // table into the index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected. Each attribute
+ // specification is composed of:
+ //
+ // ProjectionType - One of the following:
+ //
+ // KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // INCLUDE - Only the specified table attributes are projected into the index.
+ // The list of projected attributes are in NonKeyAttributes.
+ //
+ // ALL - All of the table attributes are projected into the index.
+ //
+ // NonKeyAttributes - A list of one or more non-key attribute names that are
+ // projected into the secondary index. The total count of attributes provided
+ // in NonKeyAttributes, summed across all of the secondary indexes, must
+ // not exceed 20. If you project the same attribute into two different indexes,
+ // this counts as two distinct attributes when determining the total.
+ //
+ // * IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB
+ // updates this value approximately every six hours. Recent changes might
+ // not be reflected in this value.
+ //
+ // * ItemCount - Represents the number of items in the index. DynamoDB updates
+ // this value approximately every six hours. Recent changes might not be
+ // reflected in this value.
+ //
+ // If the table is in the DELETING state, no information about indexes will
+ // be returned.
+ LocalSecondaryIndexes []LocalSecondaryIndexDescription `type:"list"`
+
+ // The provisioned throughput settings for the table, consisting of read and
+ // write capacity units, along with data about increases and decreases.
+ ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"`
+
+ // Contains details for the restore.
+ RestoreSummary *RestoreSummary `type:"structure"`
+
+ // The description of the server-side encryption status on the specified table.
+ SSEDescription *SSEDescription `type:"structure"`
+
+ // The current DynamoDB Streams configuration for the table.
+ StreamSpecification *StreamSpecification `type:"structure"`
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the table.
+ TableArn *string `type:"string"`
+
+ // Unique identifier for the table for which the backup was created.
+ TableId *string `type:"string"`
+
+ // The name of the table.
+ TableName *string `min:"3" type:"string"`
+
+ // The total size of the specified table, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ TableSizeBytes *int64 `type:"long"`
+
+ // The current state of the table:
+ //
+ // * CREATING - The table is being created.
+ //
+ // * UPDATING - The table is being updated.
+ //
+ // * DELETING - The table is being deleted.
+ //
+ // * ACTIVE - The table is ready for use.
+ TableStatus TableStatus `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s TableDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TableDescription) GoString() string {
+ return s.String()
+}
+
+// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to
+// a single DynamoDB table.
+//
+// AWS-assigned tag names and values are automatically assigned the aws: prefix,
+// which the user cannot assign. AWS-assigned tag names do not count towards
+// the tag limit of 50. User-assigned tag names have the prefix user: in the
+// Cost Allocation Report. You cannot backdate the application of a tag.
+//
+// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
+// in the Amazon DynamoDB Developer Guide.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Tag
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // The key of the tag.Tag keys are case sensitive. Each DynamoDB table can only
+ // have up to one tag with the same key. If you try to add an existing tag (same
+ // key), the existing tag value will be updated to the new value.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // The value of the tag. Tag values are case-sensitive and can be null.
+ //
+ // Value is a required field
+ Value *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tag) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "Tag"}
+
+ if s.Key == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("Key", 1))
+ }
+
+ if s.Value == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Value"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResourceInput
+type TagResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // Identifies the Amazon DynamoDB resource to which tags should be added. This
+ // value is an Amazon Resource Name (ARN).
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"1" type:"string" required:"true"`
+
+ // The tags to be assigned to the Amazon DynamoDB resource.
+ //
+ // Tags is a required field
+ Tags []Tag `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s TagResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TagResourceInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "TagResourceInput"}
+
+ if s.ResourceArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("ResourceArn", 1))
+ }
+
+ if s.Tags == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Tags"))
+ }
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResourceOutput
+type TagResourceOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+}
+
+// String returns the string representation
+func (s TagResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagResourceOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s TagResourceOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// The description of the Time to Live (TTL) status on the specified table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TimeToLiveDescription
+type TimeToLiveDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the Time to Live attribute for items in the table.
+ AttributeName *string `min:"1" type:"string"`
+
+ // The Time to Live status for the table.
+ TimeToLiveStatus TimeToLiveStatus `type:"string" enum:"true"`
+}
+
+// String returns the string representation
+func (s TimeToLiveDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TimeToLiveDescription) GoString() string {
+ return s.String()
+}
+
+// Represents the settings used to enable or disable Time to Live for the specified
+// table.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TimeToLiveSpecification
+type TimeToLiveSpecification struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the Time to Live attribute used to store the expiration time
+ // for items in the table.
+ //
+ // AttributeName is a required field
+ AttributeName *string `min:"1" type:"string" required:"true"`
+
+ // Indicates whether Time To Live is to be enabled (true) or disabled (false)
+ // on the table.
+ //
+ // Enabled is a required field
+ Enabled *bool `type:"boolean" required:"true"`
+}
+
+// String returns the string representation
+func (s TimeToLiveSpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TimeToLiveSpecification) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TimeToLiveSpecification) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "TimeToLiveSpecification"}
+
+ if s.AttributeName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("AttributeName"))
+ }
+ if s.AttributeName != nil && len(*s.AttributeName) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("AttributeName", 1))
+ }
+
+ if s.Enabled == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Enabled"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResourceInput
+type UntagResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon DyanamoDB resource the tags will be removed from. This value is
+ // an Amazon Resource Name (ARN).
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"1" type:"string" required:"true"`
+
+ // A list of tag keys. Existing tags of the resource whose keys are members
+ // of this list will be removed from the Amazon DynamoDB resource.
+ //
+ // TagKeys is a required field
+ TagKeys []string `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s UntagResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UntagResourceInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "UntagResourceInput"}
+
+ if s.ResourceArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("ResourceArn", 1))
+ }
+
+ if s.TagKeys == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TagKeys"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResourceOutput
+type UntagResourceOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+}
+
+// String returns the string representation
+func (s UntagResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagResourceOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s UntagResourceOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackupsInput
+type UpdateContinuousBackupsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the settings used to enable point in time recovery.
+ //
+ // PointInTimeRecoverySpecification is a required field
+ PointInTimeRecoverySpecification *PointInTimeRecoverySpecification `type:"structure" required:"true"`
+
+ // The name of the table.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateContinuousBackupsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateContinuousBackupsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateContinuousBackupsInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "UpdateContinuousBackupsInput"}
+
+ if s.PointInTimeRecoverySpecification == nil {
+ invalidParams.Add(aws.NewErrParamRequired("PointInTimeRecoverySpecification"))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+ if s.PointInTimeRecoverySpecification != nil {
+ if err := s.PointInTimeRecoverySpecification.Validate(); err != nil {
+ invalidParams.AddNested("PointInTimeRecoverySpecification", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackupsOutput
+type UpdateContinuousBackupsOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Represents the continuous backups and point in time recovery settings on
+ // the table.
+ ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateContinuousBackupsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateContinuousBackupsOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s UpdateContinuousBackupsOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the new provisioned throughput settings to be applied to a global
+// secondary index.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalSecondaryIndexAction
+type UpdateGlobalSecondaryIndexAction struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index to be updated.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // ProvisionedThroughput is a required field
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalSecondaryIndexAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalSecondaryIndexAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateGlobalSecondaryIndexAction) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "UpdateGlobalSecondaryIndexAction"}
+
+ if s.IndexName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("IndexName", 3))
+ }
+
+ if s.ProvisionedThroughput == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ProvisionedThroughput"))
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableInput
+type UpdateGlobalTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The global table name.
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+
+ // A list of regions that should be added or removed from the global table.
+ //
+ // ReplicaUpdates is a required field
+ ReplicaUpdates []ReplicaUpdate `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateGlobalTableInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "UpdateGlobalTableInput"}
+
+ if s.GlobalTableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("GlobalTableName", 3))
+ }
+
+ if s.ReplicaUpdates == nil {
+ invalidParams.Add(aws.NewErrParamRequired("ReplicaUpdates"))
+ }
+ if s.ReplicaUpdates != nil {
+ for i, v := range s.ReplicaUpdates {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableOutput
+type UpdateGlobalTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Contains the details of the global table.
+ GlobalTableDescription *GlobalTableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalTableOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s UpdateGlobalTableOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettingsInput
+type UpdateGlobalTableSettingsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the settings of a global secondary index for a global table that
+ // will be modified.
+ GlobalTableGlobalSecondaryIndexSettingsUpdate []GlobalTableGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"`
+
+ // The name of the global table
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+
+ // AutoScaling settings for managing provisioned write capacity for the global
+ // table.
+ GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException.
+ GlobalTableProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
+
+ // Represents the settings for a global table in a region that will be modified.
+ ReplicaSettingsUpdate []ReplicaSettingsUpdate `min:"1" type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalTableSettingsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalTableSettingsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateGlobalTableSettingsInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "UpdateGlobalTableSettingsInput"}
+ if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil && len(s.GlobalTableGlobalSecondaryIndexSettingsUpdate) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("GlobalTableGlobalSecondaryIndexSettingsUpdate", 1))
+ }
+
+ if s.GlobalTableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("GlobalTableName", 3))
+ }
+ if s.GlobalTableProvisionedWriteCapacityUnits != nil && *s.GlobalTableProvisionedWriteCapacityUnits < 1 {
+ invalidParams.Add(aws.NewErrParamMinValue("GlobalTableProvisionedWriteCapacityUnits", 1))
+ }
+ if s.ReplicaSettingsUpdate != nil && len(s.ReplicaSettingsUpdate) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("ReplicaSettingsUpdate", 1))
+ }
+ if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil {
+ for i, v := range s.GlobalTableGlobalSecondaryIndexSettingsUpdate {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalTableGlobalSecondaryIndexSettingsUpdate", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
+ if err := s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
+ invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(aws.ErrInvalidParams))
+ }
+ }
+ if s.ReplicaSettingsUpdate != nil {
+ for i, v := range s.ReplicaSettingsUpdate {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaSettingsUpdate", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettingsOutput
+type UpdateGlobalTableSettingsOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The name of the global table.
+ GlobalTableName *string `min:"3" type:"string"`
+
+ // The region specific settings for the global table.
+ ReplicaSettings []ReplicaSettingsDescription `type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalTableSettingsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalTableSettingsOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s UpdateGlobalTableSettingsOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the input of an UpdateItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItemInput
+type UpdateItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use UpdateExpression instead. For more information,
+ // see AttributeUpdates (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributeUpdates map[string]AttributeValueUpdate `type:"map"`
+
+ // A condition that must be satisfied in order for a conditional update to succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // * Functions: attribute_exists | attribute_not_exists | attribute_type
+ // | contains | begins_with | size
+ //
+ // These function names are case-sensitive.
+ //
+ // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // * Logical operators: AND | OR | NOT
+ //
+ // For more information on condition expressions, see Specifying Conditions
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see ConditionalOperator (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator ConditionalOperator `type:"string" enum:"true"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Expected map[string]ExpectedAttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Specifying Conditions
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]AttributeValue `type:"map"`
+
+ // The primary key of the item to be updated. Each element consists of an attribute
+ // name and a value for that attribute.
+ //
+ // For the primary key, you must provide all of the attributes. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide values for both the partition
+ // key and the sort key.
+ //
+ // Key is a required field
+ Key map[string]AttributeValue `type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem, do not access
+ // any indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity ReturnConsumedCapacity `type:"string" enum:"true"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections, if any, that were
+ // modified during the operation are returned in the response. If set to NONE
+ // (the default), no statistics are returned.
+ ReturnItemCollectionMetrics ReturnItemCollectionMetrics `type:"string" enum:"true"`
+
+ // Use ReturnValues if you want to get the item attributes as they appear before
+ // or after they are updated. For UpdateItem, the valid values are:
+ //
+ // * NONE - If ReturnValues is not specified, or if its value is NONE, then
+ // nothing is returned. (This setting is the default for ReturnValues.)
+ //
+ // * ALL_OLD - Returns all of the attributes of the item, as they appeared
+ // before the UpdateItem operation.
+ //
+ // * UPDATED_OLD - Returns only the updated attributes, as they appeared
+ // before the UpdateItem operation.
+ //
+ // * ALL_NEW - Returns all of the attributes of the item, as they appear
+ // after the UpdateItem operation.
+ //
+ // * UPDATED_NEW - Returns only the updated attributes, as they appear after
+ // the UpdateItem operation.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ //
+ // The values returned are strongly consistent.
+ ReturnValues ReturnValue `type:"string" enum:"true"`
+
+ // The name of the table containing the item to update.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // An expression that defines one or more attributes to be updated, the action
+ // to be performed on them, and new value(s) for them.
+ //
+ // The following action values are available for UpdateExpression.
+ //
+ // * SET - Adds one or more attributes and values to an item. If any of these
+ // attribute already exist, they are replaced by the new values. You can
+ // also use SET to add or subtract from an attribute that is of type Number.
+ // For example: SET myNum = myNum + :val
+ //
+ // SET supports the following functions:
+ //
+ // if_not_exists (path, operand) - if the item does not contain an attribute
+ // at the specified path, then if_not_exists evaluates to operand; otherwise,
+ // it evaluates to path. You can use this function to avoid overwriting an
+ // attribute that may already be present in the item.
+ //
+ // list_append (operand, operand) - evaluates to a list with a new element added
+ // to it. You can append the new element to the start or the end of the list
+ // by reversing the order of the operands.
+ //
+ // These function names are case-sensitive.
+ //
+ // * REMOVE - Removes one or more attributes from an item.
+ //
+ // * ADD - Adds the specified value to the item, if the attribute does not
+ // already exist. If the attribute does exist, then the behavior of ADD depends
+ // on the data type of the attribute:
+ //
+ // If the existing attribute is a number, and if Value is also a number, then
+ // Value is mathematically added to the existing attribute. If Value is a
+ // negative number, then it is subtracted from the existing attribute.
+ //
+ // If you use ADD to increment or decrement a number value for an item that
+ // doesn't exist before the update, DynamoDB uses 0 as the initial value.
+ //
+ // Similarly, if you use ADD for an existing item to increment or decrement
+ // an attribute value that doesn't exist before the update, DynamoDB uses
+ // 0 as the initial value. For example, suppose that the item you want to
+ // update doesn't have an attribute named itemcount, but you decide to ADD
+ // the number 3 to this attribute anyway. DynamoDB will create the itemcount
+ // attribute, set its initial value to 0, and finally add 3 to it. The result
+ // will be a new itemcount attribute in the item, with a value of 3.
+ //
+ // If the existing data type is a set and if Value is also a set, then Value
+ // is added to the existing set. For example, if the attribute value is the
+ // set [1,2], and the ADD action specified [3], then the final attribute
+ // value is [1,2,3]. An error occurs if an ADD action is specified for a
+ // set attribute and the attribute type specified does not match the existing
+ // set type.
+ //
+ // Both sets must have the same primitive data type. For example, if the existing
+ // data type is a set of strings, the Value must also be a set of strings.
+ //
+ // The ADD action only supports Number and set data types. In addition, ADD
+ // can only be used on top-level attributes, not nested attributes.
+ //
+ // * DELETE - Deletes an element from a set.
+ //
+ // If a set of values is specified, then those values are subtracted from the
+ // old set. For example, if the attribute value was the set [a,b,c] and the
+ // DELETE action specifies [a,c], then the final attribute value is [b].
+ // Specifying an empty set is an error.
+ //
+ // The DELETE action only supports set data types. In addition, DELETE can only
+ // be used on top-level attributes, not nested attributes.
+ //
+ // You can have many actions in a single expression, such as the following:
+ // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5
+ //
+ // For more information on update expressions, see Modifying Items and Attributes
+ // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html)
+ // in the Amazon DynamoDB Developer Guide.
+ UpdateExpression *string `type:"string"`
+}
+
+// String returns the string representation
+func (s UpdateItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateItemInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "UpdateItemInput"}
+
+ if s.Key == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Key"))
+ }
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of an UpdateItem operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItemOutput
+type UpdateItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // A map of attribute values as they appear before or after the UpdateItem operation,
+ // as determined by the ReturnValues parameter.
+ //
+ // The Attributes map is only present if ReturnValues was specified as something
+ // other than NONE in the request. Each element represents one attribute.
+ Attributes map[string]AttributeValue `type:"map"`
+
+ // The capacity units consumed by the UpdateItem operation. The data returned
+ // includes the total provisioned throughput consumed, along with statistics
+ // for the table and any indexes involved in the operation. ConsumedCapacity
+ // is only returned if the ReturnConsumedCapacity parameter was specified. For
+ // more information, see Provisioned Throughput (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // Information about item collections, if any, that were affected by the UpdateItem
+ // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
+ // parameter was specified. If the table does not have any local secondary indexes,
+ // this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // * ItemCollectionKey - The partition key value of the item collection.
+ // This is the same as the partition key value of the item itself.
+ //
+ // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper
+ // bound for the estimate. The estimate includes the size of all the items
+ // in the table, plus the size of all attributes projected into all of the
+ // local secondary indexes on that table. Use this estimate to measure whether
+ // a local secondary index is approaching its size limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateItemOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s UpdateItemOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the input of an UpdateTable operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableInput
+type UpdateTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of attributes that describe the key schema for the table and indexes.
+ // If you are adding a new global secondary index to the table, AttributeDefinitions
+ // must include the key element(s) of the new index.
+ AttributeDefinitions []AttributeDefinition `type:"list"`
+
+ // An array of one or more global secondary indexes for the table. For each
+ // index in the array, you can request one action:
+ //
+ // * Create - add a new global secondary index to the table.
+ //
+ // * Update - modify the provisioned throughput settings of an existing global
+ // secondary index.
+ //
+ // * Delete - remove a global secondary index from the table.
+ //
+ // For more information, see Managing Global Secondary Indexes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html)
+ // in the Amazon DynamoDB Developer Guide.
+ GlobalSecondaryIndexUpdates []GlobalSecondaryIndexUpdate `type:"list"`
+
+ // The new provisioned throughput settings for the specified table or index.
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
+
+ // The new server-side encryption settings for the specified table.
+ SSESpecification *SSESpecification `type:"structure"`
+
+ // Represents the DynamoDB Streams configuration for the table.
+ //
+ // You will receive a ResourceInUseException if you attempt to enable a stream
+ // on a table that already has a stream, or if you attempt to disable a stream
+ // on a table which does not have a stream.
+ StreamSpecification *StreamSpecification `type:"structure"`
+
+ // The name of the table to be updated.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateTableInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "UpdateTableInput"}
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+ if s.AttributeDefinitions != nil {
+ for i, v := range s.AttributeDefinitions {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.GlobalSecondaryIndexUpdates != nil {
+ for i, v := range s.GlobalSecondaryIndexUpdates {
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(aws.ErrInvalidParams))
+ }
+ }
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Represents the output of an UpdateTable operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableOutput
+type UpdateTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Represents the properties of the table.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateTableOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s UpdateTableOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents the input of an UpdateTimeToLive operation.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLiveInput
+type UpdateTimeToLiveInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the table to be configured.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // Represents the settings used to enable or disable Time to Live for the specified
+ // table.
+ //
+ // TimeToLiveSpecification is a required field
+ TimeToLiveSpecification *TimeToLiveSpecification `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateTimeToLiveInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateTimeToLiveInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateTimeToLiveInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "UpdateTimeToLiveInput"}
+
+ if s.TableName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(aws.NewErrParamMinLen("TableName", 3))
+ }
+
+ if s.TimeToLiveSpecification == nil {
+ invalidParams.Add(aws.NewErrParamRequired("TimeToLiveSpecification"))
+ }
+ if s.TimeToLiveSpecification != nil {
+ if err := s.TimeToLiveSpecification.Validate(); err != nil {
+ invalidParams.AddNested("TimeToLiveSpecification", err.(aws.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLiveOutput
+type UpdateTimeToLiveOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // Represents the output of an UpdateTimeToLive operation.
+ TimeToLiveSpecification *TimeToLiveSpecification `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateTimeToLiveOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateTimeToLiveOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s UpdateTimeToLiveOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Represents an operation to perform - either DeleteItem or PutItem. You can
+// only request one of these operations, not both, in a single WriteRequest.
+// If you do need to perform both of these operations, you will need to provide
+// two separate WriteRequest objects.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/WriteRequest
+type WriteRequest struct {
+ _ struct{} `type:"structure"`
+
+ // A request to perform a DeleteItem operation.
+ DeleteRequest *DeleteRequest `type:"structure"`
+
+ // A request to perform a PutItem operation.
+ PutRequest *PutRequest `type:"structure"`
+}
+
+// String returns the string representation
+func (s WriteRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s WriteRequest) GoString() string {
+ return s.String()
+}
+
+type AttributeAction string
+
+// Enum values for AttributeAction
+const (
+ AttributeActionAdd AttributeAction = "ADD"
+ AttributeActionPut AttributeAction = "PUT"
+ AttributeActionDelete AttributeAction = "DELETE"
+)
+
+func (enum AttributeAction) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum AttributeAction) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type BackupStatus string
+
+// Enum values for BackupStatus
+const (
+ BackupStatusCreating BackupStatus = "CREATING"
+ BackupStatusDeleted BackupStatus = "DELETED"
+ BackupStatusAvailable BackupStatus = "AVAILABLE"
+)
+
+func (enum BackupStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum BackupStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type BackupType string
+
+// Enum values for BackupType
+const (
+ BackupTypeUser BackupType = "USER"
+ BackupTypeSystem BackupType = "SYSTEM"
+)
+
+func (enum BackupType) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum BackupType) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type BackupTypeFilter string
+
+// Enum values for BackupTypeFilter
+const (
+ BackupTypeFilterUser BackupTypeFilter = "USER"
+ BackupTypeFilterSystem BackupTypeFilter = "SYSTEM"
+ BackupTypeFilterAll BackupTypeFilter = "ALL"
+)
+
+func (enum BackupTypeFilter) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum BackupTypeFilter) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type ComparisonOperator string
+
+// Enum values for ComparisonOperator
+const (
+ ComparisonOperatorEq ComparisonOperator = "EQ"
+ ComparisonOperatorNe ComparisonOperator = "NE"
+ ComparisonOperatorIn ComparisonOperator = "IN"
+ ComparisonOperatorLe ComparisonOperator = "LE"
+ ComparisonOperatorLt ComparisonOperator = "LT"
+ ComparisonOperatorGe ComparisonOperator = "GE"
+ ComparisonOperatorGt ComparisonOperator = "GT"
+ ComparisonOperatorBetween ComparisonOperator = "BETWEEN"
+ ComparisonOperatorNotNull ComparisonOperator = "NOT_NULL"
+ ComparisonOperatorNull ComparisonOperator = "NULL"
+ ComparisonOperatorContains ComparisonOperator = "CONTAINS"
+ ComparisonOperatorNotContains ComparisonOperator = "NOT_CONTAINS"
+ ComparisonOperatorBeginsWith ComparisonOperator = "BEGINS_WITH"
+)
+
+func (enum ComparisonOperator) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ComparisonOperator) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type ConditionalOperator string
+
+// Enum values for ConditionalOperator
+const (
+ ConditionalOperatorAnd ConditionalOperator = "AND"
+ ConditionalOperatorOr ConditionalOperator = "OR"
+)
+
+func (enum ConditionalOperator) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ConditionalOperator) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type ContinuousBackupsStatus string
+
+// Enum values for ContinuousBackupsStatus
+const (
+ ContinuousBackupsStatusEnabled ContinuousBackupsStatus = "ENABLED"
+ ContinuousBackupsStatusDisabled ContinuousBackupsStatus = "DISABLED"
+)
+
+func (enum ContinuousBackupsStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ContinuousBackupsStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type GlobalTableStatus string
+
+// Enum values for GlobalTableStatus
+const (
+ GlobalTableStatusCreating GlobalTableStatus = "CREATING"
+ GlobalTableStatusActive GlobalTableStatus = "ACTIVE"
+ GlobalTableStatusDeleting GlobalTableStatus = "DELETING"
+ GlobalTableStatusUpdating GlobalTableStatus = "UPDATING"
+)
+
+func (enum GlobalTableStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum GlobalTableStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type IndexStatus string
+
+// Enum values for IndexStatus
+const (
+ IndexStatusCreating IndexStatus = "CREATING"
+ IndexStatusUpdating IndexStatus = "UPDATING"
+ IndexStatusDeleting IndexStatus = "DELETING"
+ IndexStatusActive IndexStatus = "ACTIVE"
+)
+
+func (enum IndexStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum IndexStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type KeyType string
+
+// Enum values for KeyType
+const (
+ KeyTypeHash KeyType = "HASH"
+ KeyTypeRange KeyType = "RANGE"
+)
+
+func (enum KeyType) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum KeyType) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type PointInTimeRecoveryStatus string
+
+// Enum values for PointInTimeRecoveryStatus
+const (
+ PointInTimeRecoveryStatusEnabled PointInTimeRecoveryStatus = "ENABLED"
+ PointInTimeRecoveryStatusDisabled PointInTimeRecoveryStatus = "DISABLED"
+)
+
+func (enum PointInTimeRecoveryStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum PointInTimeRecoveryStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type ProjectionType string
+
+// Enum values for ProjectionType
+const (
+ ProjectionTypeAll ProjectionType = "ALL"
+ ProjectionTypeKeysOnly ProjectionType = "KEYS_ONLY"
+ ProjectionTypeInclude ProjectionType = "INCLUDE"
+)
+
+func (enum ProjectionType) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ProjectionType) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type ReplicaStatus string
+
+// Enum values for ReplicaStatus
+const (
+ ReplicaStatusCreating ReplicaStatus = "CREATING"
+ ReplicaStatusUpdating ReplicaStatus = "UPDATING"
+ ReplicaStatusDeleting ReplicaStatus = "DELETING"
+ ReplicaStatusActive ReplicaStatus = "ACTIVE"
+)
+
+func (enum ReplicaStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ReplicaStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+// Determines the level of detail about provisioned throughput consumption that
+// is returned in the response:
+//
+// * INDEXES - The response includes the aggregate ConsumedCapacity for the
+// operation, together with ConsumedCapacity for each table and secondary
+// index that was accessed.
+//
+// Note that some operations, such as GetItem and BatchGetItem, do not access
+// any indexes at all. In these cases, specifying INDEXES will only return
+// ConsumedCapacity information for table(s).
+//
+// * TOTAL - The response includes only the aggregate ConsumedCapacity for
+// the operation.
+//
+// * NONE - No ConsumedCapacity details are included in the response.
+type ReturnConsumedCapacity string
+
+// Enum values for ReturnConsumedCapacity
+const (
+ ReturnConsumedCapacityIndexes ReturnConsumedCapacity = "INDEXES"
+ ReturnConsumedCapacityTotal ReturnConsumedCapacity = "TOTAL"
+ ReturnConsumedCapacityNone ReturnConsumedCapacity = "NONE"
+)
+
+func (enum ReturnConsumedCapacity) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ReturnConsumedCapacity) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type ReturnItemCollectionMetrics string
+
+// Enum values for ReturnItemCollectionMetrics
+const (
+ ReturnItemCollectionMetricsSize ReturnItemCollectionMetrics = "SIZE"
+ ReturnItemCollectionMetricsNone ReturnItemCollectionMetrics = "NONE"
+)
+
+func (enum ReturnItemCollectionMetrics) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ReturnItemCollectionMetrics) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type ReturnValue string
+
+// Enum values for ReturnValue
+const (
+ ReturnValueNone ReturnValue = "NONE"
+ ReturnValueAllOld ReturnValue = "ALL_OLD"
+ ReturnValueUpdatedOld ReturnValue = "UPDATED_OLD"
+ ReturnValueAllNew ReturnValue = "ALL_NEW"
+ ReturnValueUpdatedNew ReturnValue = "UPDATED_NEW"
+)
+
+func (enum ReturnValue) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ReturnValue) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type SSEStatus string
+
+// Enum values for SSEStatus
+const (
+ SSEStatusEnabling SSEStatus = "ENABLING"
+ SSEStatusEnabled SSEStatus = "ENABLED"
+ SSEStatusDisabling SSEStatus = "DISABLING"
+ SSEStatusDisabled SSEStatus = "DISABLED"
+ SSEStatusUpdating SSEStatus = "UPDATING"
+)
+
+func (enum SSEStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum SSEStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type SSEType string
+
+// Enum values for SSEType
+const (
+ SSETypeAes256 SSEType = "AES256"
+ SSETypeKms SSEType = "KMS"
+)
+
+func (enum SSEType) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum SSEType) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type ScalarAttributeType string
+
+// Enum values for ScalarAttributeType
+const (
+ ScalarAttributeTypeS ScalarAttributeType = "S"
+ ScalarAttributeTypeN ScalarAttributeType = "N"
+ ScalarAttributeTypeB ScalarAttributeType = "B"
+)
+
+func (enum ScalarAttributeType) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum ScalarAttributeType) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type Select string
+
+// Enum values for Select
+const (
+ SelectAllAttributes Select = "ALL_ATTRIBUTES"
+ SelectAllProjectedAttributes Select = "ALL_PROJECTED_ATTRIBUTES"
+ SelectSpecificAttributes Select = "SPECIFIC_ATTRIBUTES"
+ SelectCount Select = "COUNT"
+)
+
+func (enum Select) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum Select) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type StreamViewType string
+
+// Enum values for StreamViewType
+const (
+ StreamViewTypeNewImage StreamViewType = "NEW_IMAGE"
+ StreamViewTypeOldImage StreamViewType = "OLD_IMAGE"
+ StreamViewTypeNewAndOldImages StreamViewType = "NEW_AND_OLD_IMAGES"
+ StreamViewTypeKeysOnly StreamViewType = "KEYS_ONLY"
+)
+
+func (enum StreamViewType) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum StreamViewType) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type TableStatus string
+
+// Enum values for TableStatus
+const (
+ TableStatusCreating TableStatus = "CREATING"
+ TableStatusUpdating TableStatus = "UPDATING"
+ TableStatusDeleting TableStatus = "DELETING"
+ TableStatusActive TableStatus = "ACTIVE"
+)
+
+func (enum TableStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum TableStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
+
+type TimeToLiveStatus string
+
+// Enum values for TimeToLiveStatus
+const (
+ TimeToLiveStatusEnabling TimeToLiveStatus = "ENABLING"
+ TimeToLiveStatusDisabling TimeToLiveStatus = "DISABLING"
+ TimeToLiveStatusEnabled TimeToLiveStatus = "ENABLED"
+ TimeToLiveStatusDisabled TimeToLiveStatus = "DISABLED"
+)
+
+func (enum TimeToLiveStatus) MarshalValue() (string, error) {
+ return string(enum), nil
+}
+
+func (enum TimeToLiveStatus) MarshalValueBuf(b []byte) ([]byte, error) {
+ b = b[0:0]
+ return append(b, enum...), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/customizations.go
new file mode 100644
index 0000000..6ee6e63
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/customizations.go
@@ -0,0 +1,110 @@
+package dynamodb
+
+import (
+ "bytes"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+ "math"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ client "github.com/aws/aws-sdk-go-v2/aws"
+ request "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+)
+
+type retryer struct {
+ client.DefaultRetryer
+}
+
+func (d retryer) RetryRules(r *request.Request) time.Duration {
+ delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50
+ return delay * time.Millisecond
+}
+
+func init() {
+ initClient = func(c *DynamoDB) {
+ if c.Config.Retryer == nil {
+ // Only override the retryer with a custom one if the config
+ // does not already contain a retryer
+ setCustomRetryer(c)
+ }
+
+ c.Handlers.Build.PushBackNamed(disableCompressionHandler)
+ c.Handlers.Unmarshal.PushFrontNamed(validateCRC32Handler)
+ }
+
+ initRequest = func(c *DynamoDB, req *aws.Request) {
+ if c.DisableComputeChecksums {
+ // Checksum validation is off, remove the validator.
+ req.Handlers.Unmarshal.Remove(validateCRC32Handler)
+ }
+ }
+}
+
+func setCustomRetryer(c *DynamoDB) {
+ c.Retryer = retryer{
+ DefaultRetryer: client.DefaultRetryer{
+ NumMaxRetries: 10,
+ },
+ }
+}
+
+func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) {
+ if length < 0 {
+ length = 0
+ }
+ buf := bytes.NewBuffer(make([]byte, 0, length))
+
+ if _, err = buf.ReadFrom(b); err != nil {
+ return nil, err
+ }
+ if err = b.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+var disableCompressionHandler = aws.NamedHandler{Name: "dynamodb.DisableCompression", Fn: disableCompression}
+
+func disableCompression(r *request.Request) {
+ r.HTTPRequest.Header.Set("Accept-Encoding", "identity")
+}
+
+var validateCRC32Handler = aws.NamedHandler{Name: "dynamodb.ValidateCRC32", Fn: validateCRC32}
+
+func validateCRC32(r *request.Request) {
+ if r.Error != nil {
+ return // already have an error, no need to verify CRC
+ }
+
+ // Try to get CRC from response
+ header := r.HTTPResponse.Header.Get("X-Amz-Crc32")
+ if header == "" {
+ return // No header, skip
+ }
+
+ expected, err := strconv.ParseUint(header, 10, 32)
+ if err != nil {
+ return // Could not determine CRC value, skip
+ }
+
+ buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength)
+ if err != nil { // failed to read the response body, skip
+ return
+ }
+
+ // Reset body for subsequent reads
+ r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes()))
+
+ // Compute the CRC checksum
+ crc := crc32.ChecksumIEEE(buf.Bytes())
+
+ if crc != uint32(expected) {
+ // CRC does not match, set a retryable error
+ r.Retryable = aws.Bool(true)
+ r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go
new file mode 100644
index 0000000..f3aacbe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go
@@ -0,0 +1,45 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package dynamodb provides the client and types for making API
+// requests to Amazon DynamoDB.
+//
+// Amazon DynamoDB is a fully managed NoSQL database service that provides fast
+// and predictable performance with seamless scalability. DynamoDB lets you
+// offload the administrative burdens of operating and scaling a distributed
+// database, so that you don't have to worry about hardware provisioning, setup
+// and configuration, replication, software patching, or cluster scaling.
+//
+// With DynamoDB, you can create database tables that can store and retrieve
+// any amount of data, and serve any level of request traffic. You can scale
+// up or scale down your tables' throughput capacity without downtime or performance
+// degradation, and use the AWS Management Console to monitor resource utilization
+// and performance metrics.
+//
+// DynamoDB automatically spreads the data and traffic for your tables over
+// a sufficient number of servers to handle your throughput and storage requirements,
+// while maintaining consistent and fast performance. All of your data is stored
+// on solid state disks (SSDs) and automatically replicated across multiple
+// Availability Zones in an AWS region, providing built-in high availability
+// and data durability.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service.
+//
+// See dynamodb package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/
+//
+// Using the Client
+//
+// To Amazon DynamoDB with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon DynamoDB client DynamoDB for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New
+package dynamodb
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc_custom.go
new file mode 100644
index 0000000..5ebc580
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc_custom.go
@@ -0,0 +1,27 @@
+/*
+AttributeValue Marshaling and Unmarshaling Helpers
+
+Utility helpers to marshal and unmarshal AttributeValue to and
+from Go types can be found in the dynamodbattribute sub package. This package
+provides has specialized functions for the common ways of working with
+AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and
+directly with *AttributeValue. This is helpful for marshaling Go types for API
+operations such as PutItem, and unmarshaling Query and Scan APIs' responses.
+
+See the dynamodbattribute package documentation for more information.
+https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/
+
+Expression Builders
+
+The expression package provides utility types and functions to build DynamoDB
+expression for type safe construction of API ExpressionAttributeNames, and
+ExpressionAttribute Values.
+
+The package represents the various DynamoDB Expressions as structs named
+accordingly. For example, ConditionBuilder represents a DynamoDB Condition
+Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on.
+
+See the expression package documentation for more information.
+https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/
+*/
+package dynamodb
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/decode.go
new file mode 100644
index 0000000..4985c9a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/decode.go
@@ -0,0 +1,733 @@
+package dynamodbattribute
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+)
+
+// An Unmarshaler is an interface to provide custom unmarshaling of
+// AttributeValues. Use this to provide custom logic determining
+// how AttributeValues should be unmarshaled.
+// type ExampleUnmarshaler struct {
+// Value int
+// }
+//
+// func (u *exampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
+// if av.N == nil {
+// return nil
+// }
+//
+// n, err := strconv.ParseInt(*av.N, 10, 0)
+// if err != nil {
+// return err
+// }
+//
+// u.Value = n
+// return nil
+// }
+type Unmarshaler interface {
+ UnmarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
+}
+
+// Unmarshal will unmarshal DynamoDB AttributeValues to Go value types.
+// Both generic interface{} and concrete types are valid unmarshal
+// destination types.
+//
+// Unmarshal will allocate maps, slices, and pointers as needed to
+// unmarshal the AttributeValue into the provided type value.
+//
+// When unmarshaling AttributeValues into structs Unmarshal matches
+// the field names of the struct to the AttributeValue Map keys.
+// Initially it will look for exact field name matching, but will
+// fall back to case insensitive if not exact match is found.
+//
+// With the exception of omitempty, omitemptyelem, binaryset, numberset
+// and stringset all struct tags used by Marshal are also used by
+// Unmarshal.
+//
+// When decoding AttributeValues to interfaces Unmarshal will use the
+// following types.
+//
+// []byte, AV Binary (B)
+// [][]byte, AV Binary Set (BS)
+// bool, AV Boolean (BOOL)
+// []interface{}, AV List (L)
+// map[string]interface{}, AV Map (M)
+// float64, AV Number (N)
+// Number, AV Number (N) with UseNumber set
+// []float64, AV Number Set (NS)
+// []Number, AV Number Set (NS) with UseNumber set
+// string, AV String (S)
+// []string, AV String Set (SS)
+//
+// If the Decoder option, UseNumber is set numbers will be unmarshaled
+// as Number values instead of float64. Use this to maintain the original
+// string formating of the number as it was represented in the AttributeValue.
+// In addition provides additional opportunities to parse the number
+// string based on individual use cases.
+//
+// When unmarshaling any error that occurs will halt the unmarshal
+// and return the error.
+//
+// The output value provided must be a non-nil pointer
+func Unmarshal(av *dynamodb.AttributeValue, out interface{}) error {
+ return NewDecoder().Decode(av, out)
+}
+
+// UnmarshalMap is an alias for Unmarshal which unmarshals from
+// a map of AttributeValues.
+//
+// The output value provided must be a non-nil pointer
+func UnmarshalMap(m map[string]dynamodb.AttributeValue, out interface{}) error {
+ return NewDecoder().Decode(&dynamodb.AttributeValue{M: m}, out)
+}
+
+// UnmarshalList is an alias for Unmarshal func which unmarshals
+// a slice of AttributeValues.
+//
+// The output value provided must be a non-nil pointer
+func UnmarshalList(l []dynamodb.AttributeValue, out interface{}) error {
+ return NewDecoder().Decode(&dynamodb.AttributeValue{L: l}, out)
+}
+
+// UnmarshalListOfMaps is an alias for Unmarshal func which unmarshals a
+// slice of maps of attribute values.
+//
+// This is useful for when you need to unmarshal the Items from a DynamoDB
+// Query API call.
+//
+// The output value provided must be a non-nil pointer
+func UnmarshalListOfMaps(l []map[string]dynamodb.AttributeValue, out interface{}) error {
+ items := make([]dynamodb.AttributeValue, len(l))
+ for i, m := range l {
+ items[i] = dynamodb.AttributeValue{M: m}
+ }
+
+ return UnmarshalList(items, out)
+}
+
+// A Decoder provides unmarshaling AttributeValues to Go value types.
+type Decoder struct {
+ MarshalOptions
+
+ // Instructs the decoder to decode AttributeValue Numbers as
+ // Number type instead of float64 when the destination type
+ // is interface{}. Similar to encoding/json.Number
+ UseNumber bool
+}
+
+// NewDecoder creates a new Decoder with default configuration. Use
+// the `opts` functional options to override the default configuration.
+func NewDecoder(opts ...func(*Decoder)) *Decoder {
+ d := &Decoder{
+ MarshalOptions: MarshalOptions{
+ SupportJSONTags: true,
+ },
+ }
+ for _, o := range opts {
+ o(d)
+ }
+
+ return d
+}
+
+// Decode will unmarshal an AttributeValue into a Go value type. An error
+// will be return if the decoder is unable to unmarshal the AttributeValue
+// to the provide Go value type.
+//
+// The output value provided must be a non-nil pointer
+func (d *Decoder) Decode(av *dynamodb.AttributeValue, out interface{}, opts ...func(*Decoder)) error {
+ v := reflect.ValueOf(out)
+ if v.Kind() != reflect.Ptr || v.IsNil() || !v.IsValid() {
+ return &InvalidUnmarshalError{Type: reflect.TypeOf(out)}
+ }
+
+ return d.decode(av, v, tag{})
+}
+
+var stringInterfaceMapType = reflect.TypeOf(map[string]interface{}(nil))
+var byteSliceType = reflect.TypeOf([]byte(nil))
+var byteSliceSlicetype = reflect.TypeOf([][]byte(nil))
+var numberType = reflect.TypeOf(Number(""))
+var timeType = reflect.TypeOf(time.Time{})
+
+func (d *Decoder) decode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ var u Unmarshaler
+ if av == nil || av.NULL != nil {
+ u, v = indirect(v, true)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(av)
+ }
+ return d.decodeNull(v)
+ }
+
+ u, v = indirect(v, false)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(av)
+ }
+
+ switch {
+ case len(av.B) != 0:
+ return d.decodeBinary(av.B, v)
+ case av.BOOL != nil:
+ return d.decodeBool(av.BOOL, v)
+ case len(av.BS) != 0:
+ return d.decodeBinarySet(av.BS, v)
+ case len(av.L) != 0:
+ return d.decodeList(av.L, v)
+ case len(av.M) != 0:
+ return d.decodeMap(av.M, v)
+ case av.N != nil:
+ return d.decodeNumber(av.N, v, fieldTag)
+ case len(av.NS) != 0:
+ return d.decodeNumberSet(av.NS, v)
+ case av.S != nil:
+ return d.decodeString(av.S, v, fieldTag)
+ case len(av.SS) != 0:
+ return d.decodeStringSet(av.SS, v)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error {
+ if v.Kind() == reflect.Interface {
+ buf := make([]byte, len(b))
+ copy(buf, b)
+ v.Set(reflect.ValueOf(buf))
+ return nil
+ }
+
+ if v.Kind() != reflect.Slice {
+ return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
+ }
+
+ if v.Type() == byteSliceType {
+ // Optimization for []byte types
+ if v.IsNil() || v.Cap() < len(b) {
+ v.Set(reflect.MakeSlice(byteSliceType, len(b), len(b)))
+ } else if v.Len() != len(b) {
+ v.SetLen(len(b))
+ }
+ copy(v.Interface().([]byte), b)
+ return nil
+ }
+
+ switch v.Type().Elem().Kind() {
+ case reflect.Uint8:
+ // Fallback to reflection copy for type aliased of []byte type
+ if v.IsNil() || v.Cap() < len(b) {
+ v.Set(reflect.MakeSlice(v.Type(), len(b), len(b)))
+ } else if v.Len() != len(b) {
+ v.SetLen(len(b))
+ }
+ for i := 0; i < len(b); i++ {
+ v.Index(i).SetUint(uint64(b[i]))
+ }
+ default:
+ if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 {
+ reflect.Copy(v, reflect.ValueOf(b))
+ break
+ }
+ return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBool(b *bool, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Bool, reflect.Interface:
+ v.Set(reflect.ValueOf(*b).Convert(v.Type()))
+ default:
+ return &UnmarshalTypeError{Value: "bool", Type: v.Type()}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice:
+ // Make room for the slice elements if needed
+ if v.IsNil() || v.Cap() < len(bs) {
+ // What about if ignoring nil/empty values?
+ v.Set(reflect.MakeSlice(v.Type(), 0, len(bs)))
+ }
+ case reflect.Array:
+ // Limited to capacity of existing array.
+ case reflect.Interface:
+ set := make([][]byte, len(bs))
+ for i, b := range bs {
+ if err := d.decodeBinary(b, reflect.ValueOf(&set[i]).Elem()); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(set))
+ return nil
+ default:
+ return &UnmarshalTypeError{Value: "binary set", Type: v.Type()}
+ }
+
+ for i := 0; i < v.Cap() && i < len(bs); i++ {
+ v.SetLen(i + 1)
+ u, elem := indirect(v.Index(i), false)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{BS: bs})
+ }
+ if err := d.decodeBinary(bs[i], elem); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeNumber(n *string, v reflect.Value, fieldTag tag) error {
+ switch v.Kind() {
+ case reflect.Interface:
+ i, err := d.decodeNumberToInterface(n)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(i))
+ return nil
+ case reflect.String:
+ if v.Type() == numberType { // Support Number value type
+ v.Set(reflect.ValueOf(Number(*n)))
+ return nil
+ }
+ v.Set(reflect.ValueOf(*n))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i, err := strconv.ParseInt(*n, 10, 64)
+ if err != nil {
+ return err
+ }
+ if v.OverflowInt(i) {
+ return &UnmarshalTypeError{
+ Value: fmt.Sprintf("number overflow, %s", *n),
+ Type: v.Type(),
+ }
+ }
+ v.SetInt(i)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ i, err := strconv.ParseUint(*n, 10, 64)
+ if err != nil {
+ return err
+ }
+ if v.OverflowUint(i) {
+ return &UnmarshalTypeError{
+ Value: fmt.Sprintf("number overflow, %s", *n),
+ Type: v.Type(),
+ }
+ }
+ v.SetUint(i)
+ case reflect.Float32, reflect.Float64:
+ i, err := strconv.ParseFloat(*n, 64)
+ if err != nil {
+ return err
+ }
+ if v.OverflowFloat(i) {
+ return &UnmarshalTypeError{
+ Value: fmt.Sprintf("number overflow, %s", *n),
+ Type: v.Type(),
+ }
+ }
+ v.SetFloat(i)
+ default:
+ if v.Type().ConvertibleTo(timeType) && fieldTag.AsUnixTime {
+ t, err := decodeUnixTime(*n)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(t).Convert(v.Type()))
+ return nil
+ }
+ return &UnmarshalTypeError{Value: "number", Type: v.Type()}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeNumberToInterface(n *string) (interface{}, error) {
+ if d.UseNumber {
+ return Number(*n), nil
+ }
+
+ // Default to float64 for all numbers
+ return strconv.ParseFloat(*n, 64)
+}
+
+func (d *Decoder) decodeNumberSet(ns []string, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice:
+ // Make room for the slice elements if needed
+ if v.IsNil() || v.Cap() < len(ns) {
+ // What about if ignoring nil/empty values?
+ v.Set(reflect.MakeSlice(v.Type(), 0, len(ns)))
+ }
+ case reflect.Array:
+ // Limited to capacity of existing array.
+ case reflect.Interface:
+ if d.UseNumber {
+ set := make([]Number, len(ns))
+ for i, n := range ns {
+ if err := d.decodeNumber(&n, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(set))
+ } else {
+ set := make([]float64, len(ns))
+ for i, n := range ns {
+ if err := d.decodeNumber(&n, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(set))
+ }
+ return nil
+ default:
+ return &UnmarshalTypeError{Value: "number set", Type: v.Type()}
+ }
+
+ for i := 0; i < v.Cap() && i < len(ns); i++ {
+ v.SetLen(i + 1)
+ u, elem := indirect(v.Index(i), false)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{NS: ns})
+ }
+ if err := d.decodeNumber(&ns[i], elem, tag{}); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeList(avList []dynamodb.AttributeValue, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice:
+ // Make room for the slice elements if needed
+ if v.IsNil() || v.Cap() < len(avList) {
+ // What about if ignoring nil/empty values?
+ v.Set(reflect.MakeSlice(v.Type(), 0, len(avList)))
+ }
+ case reflect.Array:
+ // Limited to capacity of existing array.
+ case reflect.Interface:
+ s := make([]interface{}, len(avList))
+ for i, av := range avList {
+ if err := d.decode(&av, reflect.ValueOf(&s[i]).Elem(), tag{}); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(s))
+ return nil
+ default:
+ return &UnmarshalTypeError{Value: "list", Type: v.Type()}
+ }
+
+ // If v is not a slice, array
+ for i := 0; i < v.Cap() && i < len(avList); i++ {
+ v.SetLen(i + 1)
+ if err := d.decode(&avList[i], v.Index(i), tag{}); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMap(avMap map[string]dynamodb.AttributeValue, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Map:
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ return &UnmarshalTypeError{Value: "map string key", Type: t.Key()}
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+ case reflect.Interface:
+ v.Set(reflect.MakeMap(stringInterfaceMapType))
+ v = v.Elem()
+ default:
+ return &UnmarshalTypeError{Value: "map", Type: v.Type()}
+ }
+
+ if v.Kind() == reflect.Map {
+ for k, av := range avMap {
+ key := reflect.ValueOf(k)
+ elem := reflect.New(v.Type().Elem()).Elem()
+ if err := d.decode(&av, elem, tag{}); err != nil {
+ return err
+ }
+ v.SetMapIndex(key, elem)
+ }
+ } else if v.Kind() == reflect.Struct {
+ fields := unionStructFields(v.Type(), d.MarshalOptions)
+ for k, av := range avMap {
+ if f, ok := fieldByName(fields, k); ok {
+ fv := fieldByIndex(v, f.Index, func(v *reflect.Value) bool {
+ v.Set(reflect.New(v.Type().Elem()))
+ return true // to continue the loop.
+ })
+ if err := d.decode(&av, fv, f.tag); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeNull(v reflect.Value) error {
+ if v.IsValid() && v.CanSet() {
+ v.Set(reflect.Zero(v.Type()))
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeString(s *string, v reflect.Value, fieldTag tag) error {
+ if fieldTag.AsString {
+ return d.decodeNumber(s, v, fieldTag)
+ }
+
+ // To maintain backwards compatibility with ConvertFrom family of methods which
+ // converted strings to time.Time structs
+ if v.Type().ConvertibleTo(timeType) {
+ t, err := time.Parse(time.RFC3339, *s)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(t).Convert(v.Type()))
+ return nil
+ }
+
+ switch v.Kind() {
+ case reflect.String:
+ v.SetString(*s)
+ case reflect.Interface:
+ // Ensure type aliasing is handled properly
+ v.Set(reflect.ValueOf(*s).Convert(v.Type()))
+ default:
+ return &UnmarshalTypeError{Value: "string", Type: v.Type()}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeStringSet(ss []string, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Slice:
+ // Make room for the slice elements if needed
+ if v.IsNil() || v.Cap() < len(ss) {
+ v.Set(reflect.MakeSlice(v.Type(), 0, len(ss)))
+ }
+ case reflect.Array:
+ // Limited to capacity of existing array.
+ case reflect.Interface:
+ set := make([]string, len(ss))
+ for i, s := range ss {
+ if err := d.decodeString(&s, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(set))
+ return nil
+ default:
+ return &UnmarshalTypeError{Value: "string set", Type: v.Type()}
+ }
+
+ for i := 0; i < v.Cap() && i < len(ss); i++ {
+ v.SetLen(i + 1)
+ u, elem := indirect(v.Index(i), false)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{SS: ss})
+ }
+ if err := d.decodeString(&ss[i], elem, tag{}); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func decodeUnixTime(n string) (time.Time, error) {
+ v, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ return time.Time{}, &UnmarshalError{
+ Err: err, Value: n, Type: timeType,
+ }
+ }
+
+ return time.Unix(v, 0), nil
+}
+
+// indirect will walk a value's interface or pointer value types. Returning
+// the final value or the value a unmarshaler is defined on.
+//
+// Based on the enoding/json type reflect value type indirection in Go Stdlib
+// https://golang.org/src/encoding/json/decode.go indirect func.
+func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+
+ return nil, v
+}
+
+// A Number represents a Attributevalue number literal.
+type Number string
+
+// Float64 attempts to cast the number ot a float64, returning
+// the result of the case or error if the case failed.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 attempts to cast the number ot a int64, returning
+// the result of the case or error if the case failed.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// Uint64 attempts to cast the number ot a uint64, returning
+// the result of the case or error if the case failed.
+func (n Number) Uint64() (uint64, error) {
+ return strconv.ParseUint(string(n), 10, 64)
+}
+
+// String returns the raw number represented as a string
+func (n Number) String() string {
+ return string(n)
+}
+
+type emptyOrigError struct{}
+
+func (e emptyOrigError) OrigErr() error {
+ return nil
+}
+
+// An UnmarshalTypeError is an error type representing a error
+// unmarshaling the AttributeValue's element to a Go value type.
+// Includes details about the AttributeValue type and Go value type.
+type UnmarshalTypeError struct {
+ emptyOrigError
+ Value string
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface
+func (e *UnmarshalTypeError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code(), e.Message())
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *UnmarshalTypeError) Code() string {
+ return "UnmarshalTypeError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *UnmarshalTypeError) Message() string {
+ return "cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError is an error type representing an invalid type
+// encountered while unmarshaling a AttributeValue to a Go value type.
+type InvalidUnmarshalError struct {
+ emptyOrigError
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface
+func (e *InvalidUnmarshalError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code(), e.Message())
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *InvalidUnmarshalError) Code() string {
+ return "InvalidUnmarshalError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *InvalidUnmarshalError) Message() string {
+ if e.Type == nil {
+ return "cannot unmarshal to nil value"
+ }
+ if e.Type.Kind() != reflect.Ptr {
+ return "cannot unmarshal to non-pointer value, got " + e.Type.String()
+ }
+ return "cannot unmarshal to nil value, " + e.Type.String()
+}
+
+// An UnmarshalError wraps an error that occured while unmarshaling a DynamoDB
+// AttributeValue element into a Go type. This is different from UnmarshalTypeError
+// in that it wraps the underlying error that occured.
+type UnmarshalError struct {
+ Err error
+ Value string
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface.
+func (e *UnmarshalError) Error() string {
+ return fmt.Sprintf("%s: %s\ncaused by: %v", e.Code(), e.Message(), e.Err)
+}
+
+// OrigErr returns the original error that caused this issue.
+func (e UnmarshalError) OrigErr() error {
+ return e.Err
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *UnmarshalError) Code() string {
+ return "UnmarshalError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *UnmarshalError) Message() string {
+ return fmt.Sprintf("cannot unmarshal %q into %s.",
+ e.Value, e.Type.String())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/doc.go
new file mode 100644
index 0000000..7a51ac0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/doc.go
@@ -0,0 +1,95 @@
+// Package dynamodbattribute provides marshaling and unmarshaling utilities to
+// convert between Go types and dynamodb.AttributeValues.
+//
+// These utilities allow you to marshal slices, maps, structs, and scalar values
+// to and from dynamodb.AttributeValue. These are useful when marshaling
+// Go value tyes to dynamodb.AttributeValue for DynamoDB requests, or
+// unmarshaling the dynamodb.AttributeValue back into a Go value type.
+//
+// AttributeValue Marshaling
+//
+// To marshal a Go type to a dynamodbAttributeValue you can use the Marshal
+// functions in the dynamodbattribute package. There are specialized versions
+// of these functions for collections of Attributevalue, such as maps and lists.
+//
+// The following example uses MarshalMap to convert the Record Go type to a
+// dynamodb.AttributeValue type and use the value to make a PutItem API request.
+//
+// type Record struct {
+// ID string
+// URLs []string
+// }
+//
+// //...
+//
+// r := Record{
+// ID: "ABC123",
+// URLs: []string{
+// "https://example.com/first/link",
+// "https://example.com/second/url",
+// },
+// }
+// av, err := dynamodbattribute.MarshalMap(r)
+// if err != nil {
+// panic(fmt.Sprintf("failed to DynamoDB marshal Record, %v", err))
+// }
+//
+// _, err = svc.PutItem(&dynamodb.PutItemInput{
+// TableName: aws.String(myTableName),
+// Item: av,
+// })
+// if err != nil {
+// panic(fmt.Sprintf("failed to put Record to DynamoDB, %v", err))
+// }
+//
+// AttributeValue Unmarshaling
+//
+// To unmarshal a dynamodb.AttributeValue to a Go type you can use the Unmarshal
+// functions in the dynamodbattribute package. There are specialized versions
+// of these functions for collections of Attributevalue, such as maps and lists.
+//
+// The following example will unmarshal the DynamoDB's Scan API operation. The
+// Items returned by the operation will be unmarshaled into the slice of Records
+// Go type.
+//
+// type Record struct {
+// ID string
+// URLs []string
+// }
+//
+// //...
+//
+// var records []Record
+//
+// // Use the ScanPages method to perform the scan with pagination. Use
+// // just Scan method to make the API call without pagination.
+// err := svc.ScanPages(&dynamodb.ScanInput{
+// TableName: aws.String(myTableName),
+// }, func(page *dynamodb.ScanOutput, last bool) bool {
+// recs := []Record{}
+//
+// err := dynamodbattribute.UnmarshalListOfMaps(page.Items, &recs)
+// if err != nil {
+// panic(fmt.Sprintf("failed to unmarshal Dynamodb Scan Items, %v", err))
+// }
+//
+// records = append(records, recs...)
+//
+// return true // keep paging
+// })
+//
+// The ConvertTo, ConvertToList, ConvertToMap, ConvertFrom, ConvertFromMap
+// and ConvertFromList methods have been deprecated. The Marshal and Unmarshal
+// functions should be used instead. The ConvertTo|From marshallers do not
+// support BinarySet, NumberSet, nor StringSets, and will incorrect marshal
+// binary data fields in structs as base64 strings.
+//
+// The Marshal and Unmarshal functions correct this behavior, and removes
+// the reliance on encoding.json. `json` struct tags are still supported. In
+// addition support for a new struct tag `dynamodbav` was added. Support for
+// the json.Marshaler and json.Unmarshaler interfaces have been removed and
+// replaced with have been replaced with dynamodbattribute.Marshaler and
+// dynamodbattribute.Unmarshaler interfaces.
+//
+// `time.Time` is marshaled as RFC3339 format.
+package dynamodbattribute
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/encode.go
new file mode 100644
index 0000000..e3f0ec7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/encode.go
@@ -0,0 +1,643 @@
+package dynamodbattribute
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+)
+
+// An UnixTime provides aliasing of time.Time into a type that when marshaled
+// and unmarshaled with DynamoDB AttributeValues it will be done so as number
+// instead of string in seconds since January 1, 1970 UTC.
+//
+// This type is useful as an alternative to the struct tag `unixtime` when you
+// want to have your time value marshaled as Unix time in seconds intead of
+// the default time.RFC3339.
+//
+// Important to note that zero value time as unixtime is not 0 seconds
+// from January 1, 1970 UTC, but -62135596800. Which is seconds between
+// January 1, 0001 UTC, and January 1, 0001 UTC.
+type UnixTime time.Time
+
+// MarshalDynamoDBAttributeValue implements the Marshaler interface so that
+// the UnixTime can be marshaled from to a DynamoDB AttributeValue number
+// value encoded in the number of seconds since January 1, 1970 UTC.
+func (e UnixTime) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
+ t := time.Time(e)
+ s := strconv.FormatInt(t.Unix(), 10)
+ av.N = &s
+
+ return nil
+}
+
+// UnmarshalDynamoDBAttributeValue implements the Unmarshaler interface so that
+// the UnixTime can be unmarshaled from a DynamoDB AttributeValue number representing
+// the number of seconds since January 1, 1970 UTC.
+//
+// If an error parsing the AttributeValue number occurs UnmarshalError will be
+// returned.
+func (e *UnixTime) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
+ t, err := decodeUnixTime(aws.StringValue(av.N))
+ if err != nil {
+ return err
+ }
+
+ *e = UnixTime(t)
+ return nil
+}
+
+// A Marshaler is an interface to provide custom marshaling of Go value types
+// to AttributeValues. Use this to provide custom logic determining how a
+// Go Value type should be marshaled.
+//
+// type ExampleMarshaler struct {
+// Value int
+// }
+// func (m *ExampleMarshaler) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
+// n := fmt.Sprintf("%v", m.Value)
+// av.N = &n
+// return nil
+// }
+//
+type Marshaler interface {
+ MarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
+}
+
+// Marshal will serialize the passed in Go value type into a DynamoDB AttributeValue
+// type. This value can be used in DynamoDB API operations to simplify marshaling
+// your Go value types into AttributeValues.
+//
+// Marshal will recursively transverse the passed in value marshaling its
+// contents into a AttributeValue. Marshal supports basic scalars
+// (int,uint,float,bool,string), maps, slices, and structs. Anonymous
+// nested types are flattened based on Go anonymous type visibility.
+//
+// Marshaling slices to AttributeValue will default to a List for all
+// types except for []byte and [][]byte. []byte will be marshaled as
+// Binary data (B), and [][]byte will be marshaled as binary data set
+// (BS).
+//
+// `dynamodbav` struct tag can be used to control how the value will be
+// marshaled into a AttributeValue.
+//
+// // Field is ignored
+// Field int `dynamodbav:"-"`
+//
+// // Field AttributeValue map key "myName"
+// Field int `dynamodbav:"myName"`
+//
+// // Field AttributeValue map key "myName", and
+// // Field is omitted if it is empty
+// Field int `dynamodbav:"myName,omitempty"`
+//
+// // Field AttributeValue map key "Field", and
+// // Field is omitted if it is empty
+// Field int `dynamodbav:",omitempty"`
+//
+// // Field's elems will be omitted if empty
+// // only valid for slices, and maps.
+// Field []string `dynamodbav:",omitemptyelem"`
+//
+// // Field will be marshaled as a AttributeValue string
+// // only value for number types, (int,uint,float)
+// Field int `dynamodbav:",string"`
+//
+// // Field will be marshaled as a binary set
+// Field [][]byte `dynamodbav:",binaryset"`
+//
+// // Field will be marshaled as a number set
+// Field []int `dynamodbav:",numberset"`
+//
+// // Field will be marshaled as a string set
+// Field []string `dynamodbav:",stringset"`
+//
+// // Field will be marshaled as Unix time number in seconds.
+// // This tag is only valid with time.Time typed struct fields.
+// // Important to note that zero value time as unixtime is not 0 seconds
+// // from January 1, 1970 UTC, but -62135596800. Which is seconds between
+// // January 1, 0001 UTC, and January 1, 0001 UTC.
+// Field time.Time `dynamodbav:",unixtime"`
+//
+// The omitempty tag is only used during Marshaling and is ignored for
+// Unmarshal. Any zero value or a value when marshaled results in a
+// AttributeValue NULL will be added to AttributeValue Maps during struct
+// marshal. The omitemptyelem tag works the same as omitempty except it
+// applies to maps and slices instead of struct fields, and will not be
+// included in the marshaled AttributeValue Map, List, or Set.
+//
+// For convenience and backwards compatibility with ConvertTo functions
+// json struct tags are supported by the Marshal and Unmarshal. If
+// both json and dynamodbav struct tags are provided the json tag will
+// be ignored in favor of dynamodbav.
+//
+// All struct fields and with anonymous fields, are marshaled unless the
+// any of the following conditions are meet.
+//
+// - the field is not exported
+// - json or dynamodbav field tag is "-"
+// - json or dynamodbav field tag specifies "omitempty", and is empty.
+//
+// Pointer and interfaces values encode as the value pointed to or contained
+// in the interface. A nil value encodes as the AttributeValue NULL value.
+//
+// Channel, complex, and function values are not encoded and will be skipped
+// when walking the value to be marshaled.
+//
+// When marshaling any error that occurs will halt the marshal and return
+// the error.
+//
+// Marshal cannot represent cyclic data structures and will not handle them.
+// Passing cyclic structures to Marshal will result in an infinite recursion.
+func Marshal(in interface{}) (*dynamodb.AttributeValue, error) {
+ return NewEncoder().Encode(in)
+}
+
+// MarshalMap is an alias for Marshal func which marshals Go value
+// type to a map of AttributeValues.
+//
+// This is useful for DynamoDB APIs such as PutItem.
+func MarshalMap(in interface{}) (map[string]dynamodb.AttributeValue, error) {
+ av, err := NewEncoder().Encode(in)
+ if err != nil || av == nil || av.M == nil {
+ return map[string]dynamodb.AttributeValue{}, err
+ }
+
+ return av.M, nil
+}
+
+// MarshalList is an alias for Marshal func which marshals Go value
+// type to a slice of AttributeValues.
+func MarshalList(in interface{}) ([]dynamodb.AttributeValue, error) {
+ av, err := NewEncoder().Encode(in)
+ if err != nil || av == nil || av.L == nil {
+ return []dynamodb.AttributeValue{}, err
+ }
+
+ return av.L, nil
+}
+
+// A MarshalOptions is a collection of options shared between marshaling
+// and unmarshaling
+type MarshalOptions struct {
+ // States that the encoding/json struct tags should be supported.
+ // if a `dynamodbav` struct tag is also provided the encoding/json
+ // tag will be ignored.
+ //
+ // Enabled by default.
+ SupportJSONTags bool
+
+ // Support other custom struct tag keys, such as `yaml` or `toml`.
+ // Note that values provided with a custom TagKey must also be supported
+ // by the (un)marshalers in this package.
+ TagKey string
+}
+
+// An Encoder provides marshaling Go value types to AttributeValues.
+type Encoder struct {
+ MarshalOptions
+
+ // Empty strings, "", will be marked as NULL AttributeValue types.
+ // Empty strings are not valid values for DynamoDB. Will not apply
+ // to lists, sets, or maps. Use the struct tag `omitemptyelem`
+ // to skip empty (zero) values in lists, sets and maps.
+ //
+ // Enabled by default.
+ NullEmptyString bool
+}
+
+// NewEncoder creates a new Encoder with default configuration. Use
+// the `opts` functional options to override the default configuration.
+func NewEncoder(opts ...func(*Encoder)) *Encoder {
+ e := &Encoder{
+ MarshalOptions: MarshalOptions{
+ SupportJSONTags: true,
+ },
+ NullEmptyString: true,
+ }
+ for _, o := range opts {
+ o(e)
+ }
+
+ return e
+}
+
+// Encode will marshal a Go value type to an AttributeValue. Returning
+// the AttributeValue constructed or error.
+func (e *Encoder) Encode(in interface{}) (*dynamodb.AttributeValue, error) {
+ av := &dynamodb.AttributeValue{}
+ if err := e.encode(av, reflect.ValueOf(in), tag{}); err != nil {
+ return nil, err
+ }
+
+ return av, nil
+}
+
+func fieldByIndex(v reflect.Value, index []int,
+ OnEmbeddedNilStruct func(*reflect.Value) bool) reflect.Value {
+ fv := v
+ for i, x := range index {
+ if i > 0 {
+ if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
+ if fv.IsNil() && !OnEmbeddedNilStruct(&fv) {
+ break
+ }
+ fv = fv.Elem()
+ }
+ }
+ fv = fv.Field(x)
+ }
+ return fv
+}
+
+func (e *Encoder) encode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ // We should check for omitted values first before dereferencing.
+ if fieldTag.OmitEmpty && emptyValue(v) {
+ encodeNull(av)
+ return nil
+ }
+
+ // Handle both pointers and interface conversion into types
+ v = valueElem(v)
+
+ if v.Kind() != reflect.Invalid {
+ if used, err := tryMarshaler(av, v); used {
+ return err
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Invalid:
+ encodeNull(av)
+ case reflect.Struct:
+ return e.encodeStruct(av, v, fieldTag)
+ case reflect.Map:
+ return e.encodeMap(av, v, fieldTag)
+ case reflect.Slice, reflect.Array:
+ return e.encodeSlice(av, v, fieldTag)
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ // do nothing for unsupported types
+ default:
+ return e.encodeScalar(av, v, fieldTag)
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeStruct(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ // To maintain backwards compatibility with ConvertTo family of methods which
+ // converted time.Time structs to strings
+ if v.Type().ConvertibleTo(timeType) {
+ var t time.Time
+ t = v.Convert(timeType).Interface().(time.Time)
+ if fieldTag.AsUnixTime {
+ return UnixTime(t).MarshalDynamoDBAttributeValue(av)
+ }
+ s := t.Format(time.RFC3339Nano)
+ av.S = &s
+ return nil
+ }
+
+ av.M = map[string]dynamodb.AttributeValue{}
+ fields := unionStructFields(v.Type(), e.MarshalOptions)
+ for _, f := range fields {
+ if f.Name == "" {
+ return &InvalidMarshalError{msg: "map key cannot be empty"}
+ }
+
+ found := true
+ fv := fieldByIndex(v, f.Index, func(v *reflect.Value) bool {
+ found = false
+ return false // to break the loop.
+ })
+ if !found {
+ continue
+ }
+ elem := dynamodb.AttributeValue{}
+ err := e.encode(&elem, fv, f.tag)
+ if err != nil {
+ return err
+ }
+ skip, err := keepOrOmitEmpty(f.OmitEmpty, elem, err)
+ if err != nil {
+ return err
+ } else if skip {
+ continue
+ }
+
+ av.M[f.Name] = elem
+ }
+ if len(av.M) == 0 {
+ encodeNull(av)
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ av.M = map[string]dynamodb.AttributeValue{}
+ for _, key := range v.MapKeys() {
+ keyName := fmt.Sprint(key.Interface())
+ if keyName == "" {
+ return &InvalidMarshalError{msg: "map key cannot be empty"}
+ }
+
+ elemVal := v.MapIndex(key)
+ elem := dynamodb.AttributeValue{}
+ err := e.encode(&elem, elemVal, tag{})
+ skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, elem, err)
+ if err != nil {
+ return err
+ } else if skip {
+ continue
+ }
+
+ av.M[keyName] = elem
+ }
+ if len(av.M) == 0 {
+ encodeNull(av)
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ switch v.Type().Elem().Kind() {
+ case reflect.Uint8:
+ b := v.Bytes()
+ if len(b) == 0 {
+ encodeNull(av)
+ return nil
+ }
+ av.B = append([]byte{}, b...)
+ default:
+ var elemFn func(dynamodb.AttributeValue) error
+
+ if fieldTag.AsBinSet || v.Type() == byteSliceSlicetype { // Binary Set
+ av.BS = make([][]byte, 0, v.Len())
+ elemFn = func(elem dynamodb.AttributeValue) error {
+ if elem.B == nil {
+ return &InvalidMarshalError{msg: "binary set must only contain non-nil byte slices"}
+ }
+ av.BS = append(av.BS, elem.B)
+ return nil
+ }
+ } else if fieldTag.AsNumSet { // Number Set
+ av.NS = make([]string, 0, v.Len())
+ elemFn = func(elem dynamodb.AttributeValue) error {
+ if elem.N == nil {
+ return &InvalidMarshalError{msg: "number set must only contain non-nil string numbers"}
+ }
+ av.NS = append(av.NS, *elem.N)
+ return nil
+ }
+ } else if fieldTag.AsStrSet { // String Set
+ av.SS = make([]string, 0, v.Len())
+ elemFn = func(elem dynamodb.AttributeValue) error {
+ if elem.S == nil {
+ return &InvalidMarshalError{msg: "string set must only contain non-nil strings"}
+ }
+ av.SS = append(av.SS, *elem.S)
+ return nil
+ }
+ } else { // List
+ av.L = make([]dynamodb.AttributeValue, 0, v.Len())
+ elemFn = func(elem dynamodb.AttributeValue) error {
+ av.L = append(av.L, elem)
+ return nil
+ }
+ }
+
+ if n, err := e.encodeList(v, fieldTag, elemFn); err != nil {
+ return err
+ } else if n == 0 {
+ encodeNull(av)
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeList(v reflect.Value, fieldTag tag, elemFn func(dynamodb.AttributeValue) error) (int, error) {
+ count := 0
+ for i := 0; i < v.Len(); i++ {
+ elem := dynamodb.AttributeValue{}
+ err := e.encode(&elem, v.Index(i), tag{OmitEmpty: fieldTag.OmitEmptyElem})
+ skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, elem, err)
+ if err != nil {
+ return 0, err
+ } else if skip {
+ continue
+ }
+
+ if err := elemFn(elem); err != nil {
+ return 0, err
+ }
+ count++
+ }
+
+ return count, nil
+}
+
+func (e *Encoder) encodeScalar(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ if v.Type() == numberType {
+ s := v.String()
+ if fieldTag.AsString {
+ av.S = &s
+ } else {
+ av.N = &s
+ }
+ return nil
+ }
+
+ switch v.Kind() {
+ case reflect.Bool:
+ av.BOOL = new(bool)
+ *av.BOOL = v.Bool()
+ case reflect.String:
+ if err := e.encodeString(av, v); err != nil {
+ return err
+ }
+ default:
+ // Fallback to encoding numbers, will return invalid type if not supported
+ if err := e.encodeNumber(av, v); err != nil {
+ return err
+ }
+ if fieldTag.AsString && av.NULL == nil && av.N != nil {
+ av.S = av.N
+ av.N = nil
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeNumber(av *dynamodb.AttributeValue, v reflect.Value) error {
+ if used, err := tryMarshaler(av, v); used {
+ return err
+ }
+
+ var out string
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out = encodeInt(v.Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ out = encodeUint(v.Uint())
+ case reflect.Float32, reflect.Float64:
+ out = encodeFloat(v.Float())
+ default:
+ return &unsupportedMarshalTypeError{Type: v.Type()}
+ }
+
+ av.N = &out
+
+ return nil
+}
+
+func (e *Encoder) encodeString(av *dynamodb.AttributeValue, v reflect.Value) error {
+ if used, err := tryMarshaler(av, v); used {
+ return err
+ }
+
+ switch v.Kind() {
+ case reflect.String:
+ s := v.String()
+ if len(s) == 0 && e.NullEmptyString {
+ encodeNull(av)
+ } else {
+ av.S = &s
+ }
+ default:
+ return &unsupportedMarshalTypeError{Type: v.Type()}
+ }
+
+ return nil
+}
+
+func encodeInt(i int64) string {
+ return strconv.FormatInt(i, 10)
+}
+func encodeUint(u uint64) string {
+ return strconv.FormatUint(u, 10)
+}
+func encodeFloat(f float64) string {
+ return strconv.FormatFloat(f, 'f', -1, 64)
+}
+func encodeNull(av *dynamodb.AttributeValue) {
+ t := true
+ *av = dynamodb.AttributeValue{NULL: &t}
+}
+
+func valueElem(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr:
+ for v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ }
+
+ return v
+}
+
+func emptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func tryMarshaler(av *dynamodb.AttributeValue, v reflect.Value) (bool, error) {
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+
+ if v.Type().NumMethod() == 0 {
+ return false, nil
+ }
+
+ if m, ok := v.Interface().(Marshaler); ok {
+ return true, m.MarshalDynamoDBAttributeValue(av)
+ }
+
+ return false, nil
+}
+
+func keepOrOmitEmpty(omitEmpty bool, av dynamodb.AttributeValue, err error) (bool, error) {
+ if err != nil {
+ if _, ok := err.(*unsupportedMarshalTypeError); ok {
+ return true, nil
+ }
+ return false, err
+ }
+
+ if av.NULL != nil && omitEmpty {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// An InvalidMarshalError is an error type representing an error
+// occurring when marshaling a Go value type to an AttributeValue.
+type InvalidMarshalError struct {
+ emptyOrigError
+ msg string
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface
+func (e *InvalidMarshalError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code(), e.Message())
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *InvalidMarshalError) Code() string {
+ return "InvalidMarshalError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *InvalidMarshalError) Message() string {
+ return e.msg
+}
+
+// An unsupportedMarshalTypeError represents a Go value type
+// which cannot be marshaled into an AttributeValue and should
+// be skipped by the marshaler.
+type unsupportedMarshalTypeError struct {
+ emptyOrigError
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface
+func (e *unsupportedMarshalTypeError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code(), e.Message())
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *unsupportedMarshalTypeError) Code() string {
+ return "unsupportedMarshalTypeError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *unsupportedMarshalTypeError) Message() string {
+ return "Go value type " + e.Type.String() + " is not supported"
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/field.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/field.go
new file mode 100644
index 0000000..f1c74fe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/field.go
@@ -0,0 +1,273 @@
+package dynamodbattribute
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+)
+
+type field struct {
+ tag
+
+ Name string
+ NameFromTag bool
+
+ Index []int
+ Type reflect.Type
+}
+
+func fieldByName(fields []field, name string) (field, bool) {
+ foldExists := false
+ foldField := field{}
+
+ for _, f := range fields {
+ if f.Name == name {
+ return f, true
+ }
+ if !foldExists && strings.EqualFold(f.Name, name) {
+ foldField = f
+ foldExists = true
+ }
+ }
+
+ return foldField, foldExists
+}
+
+func buildField(pIdx []int, i int, sf reflect.StructField, fieldTag tag) field {
+ f := field{
+ Name: sf.Name,
+ Type: sf.Type,
+ tag: fieldTag,
+ }
+ if len(fieldTag.Name) != 0 {
+ f.NameFromTag = true
+ f.Name = fieldTag.Name
+ }
+
+ f.Index = make([]int, len(pIdx)+1)
+ copy(f.Index, pIdx)
+ f.Index[len(pIdx)] = i
+
+ return f
+}
+
+func unionStructFields(t reflect.Type, opts MarshalOptions) []field {
+ fields := enumFields(t, opts)
+
+ sort.Sort(fieldsByName(fields))
+
+ fields = visibleFields(fields)
+
+ return fields
+}
+
+// enumFields will recursively iterate through a structure and its nested
+// anonymous fields.
+//
+// Based on the enoding/json struct field enumeration of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go typeField func.
+func enumFields(t reflect.Type, opts MarshalOptions) []field {
+ // Fields to explore
+ current := []field{}
+ next := []field{{Type: t}}
+
+ // count of queued names
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ visited := map[reflect.Type]struct{}{}
+ fields := []field{}
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if _, ok := visited[f.Type]; ok {
+ continue
+ }
+ visited[f.Type] = struct{}{}
+
+ for i := 0; i < f.Type.NumField(); i++ {
+ sf := f.Type.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous {
+ // Ignore unexported and non-anonymous fields
+ // unexported but anonymous field may still be used if
+ // the type has exported nested fields
+ continue
+ }
+
+ fieldTag := tag{}
+ fieldTag.parseAVTag(sf.Tag)
+ // Because MarshalOptions.TagKey must be explicitly set, use it
+ // over JSON, which is enabled by default.
+ if opts.TagKey != "" && fieldTag == (tag{}) {
+ fieldTag.parseStructTag(opts.TagKey, sf.Tag)
+ } else if opts.SupportJSONTags && fieldTag == (tag{}) {
+ fieldTag.parseStructTag("json", sf.Tag)
+ }
+
+ if fieldTag.Ignore {
+ continue
+ }
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+
+ structField := buildField(f.Index, i, sf, fieldTag)
+ structField.Type = ft
+
+ if !sf.Anonymous || ft.Kind() != reflect.Struct {
+ fields = append(fields, structField)
+ if count[f.Type] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, structField)
+ }
+ continue
+ }
+
+ // Record new anon struct to explore next round
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, structField)
+ }
+ }
+ }
+ }
+
+ return fields
+}
+
+// visibleFields will return a slice of fields which are visible based on
+// Go's standard visiblity rules with the exception of ties being broken
+// by depth and struct tag naming.
+//
+// Based on the enoding/json field filtering of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go typeField func.
+func visibleFields(fields []field) []field {
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.Name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.Name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(fieldsByIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+//
+// Based on the enoding/json field filtering of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go dominantField func.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].Index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.Index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.NameFromTag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+// fieldsByName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+//
+// Based on the enoding/json field filtering of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go fieldsByName type.
+type fieldsByName []field
+
+func (x fieldsByName) Len() int { return len(x) }
+
+func (x fieldsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x fieldsByName) Less(i, j int) bool {
+ if x[i].Name != x[j].Name {
+ return x[i].Name < x[j].Name
+ }
+ if len(x[i].Index) != len(x[j].Index) {
+ return len(x[i].Index) < len(x[j].Index)
+ }
+ if x[i].NameFromTag != x[j].NameFromTag {
+ return x[i].NameFromTag
+ }
+ return fieldsByIndex(x).Less(i, j)
+}
+
+// fieldsByIndex sorts field by index sequence.
+//
+// Based on the enoding/json field filtering of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go fieldsByIndex type.
+type fieldsByIndex []field
+
+func (x fieldsByIndex) Len() int { return len(x) }
+
+func (x fieldsByIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x fieldsByIndex) Less(i, j int) bool {
+ for k, xik := range x[i].Index {
+ if k >= len(x[j].Index) {
+ return false
+ }
+ if xik != x[j].Index[k] {
+ return xik < x[j].Index[k]
+ }
+ }
+ return len(x[i].Index) < len(x[j].Index)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/tag.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/tag.go
new file mode 100644
index 0000000..8b76a7e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/dynamodbattribute/tag.go
@@ -0,0 +1,68 @@
+package dynamodbattribute
+
+import (
+ "reflect"
+ "strings"
+)
+
+type tag struct {
+ Name string
+ Ignore bool
+ OmitEmpty bool
+ OmitEmptyElem bool
+ AsString bool
+ AsBinSet, AsNumSet, AsStrSet bool
+ AsUnixTime bool
+}
+
+func (t *tag) parseAVTag(structTag reflect.StructTag) {
+ tagStr := structTag.Get("dynamodbav")
+ if len(tagStr) == 0 {
+ return
+ }
+
+ t.parseTagStr(tagStr)
+}
+
+func (t *tag) parseStructTag(tag string, structTag reflect.StructTag) {
+ tagStr := structTag.Get(tag)
+ if len(tagStr) == 0 {
+ return
+ }
+
+ t.parseTagStr(tagStr)
+}
+
+func (t *tag) parseTagStr(tagStr string) {
+ parts := strings.Split(tagStr, ",")
+ if len(parts) == 0 {
+ return
+ }
+
+ if name := parts[0]; name == "-" {
+ t.Name = ""
+ t.Ignore = true
+ } else {
+ t.Name = name
+ t.Ignore = false
+ }
+
+ for _, opt := range parts[1:] {
+ switch opt {
+ case "omitempty":
+ t.OmitEmpty = true
+ case "omitemptyelem":
+ t.OmitEmptyElem = true
+ case "string":
+ t.AsString = true
+ case "binaryset":
+ t.AsBinSet = true
+ case "numberset":
+ t.AsNumSet = true
+ case "stringset":
+ t.AsStrSet = true
+ case "unixtime":
+ t.AsUnixTime = true
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/errors.go
new file mode 100644
index 0000000..4abbbe6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/errors.go
@@ -0,0 +1,148 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package dynamodb
+
+const (
+
+ // ErrCodeBackupInUseException for service response error code
+ // "BackupInUseException".
+ //
+ // There is another ongoing conflicting backup control plane operation on the
+ // table. The backups is either being created, deleted or restored to a table.
+ ErrCodeBackupInUseException = "BackupInUseException"
+
+ // ErrCodeBackupNotFoundException for service response error code
+ // "BackupNotFoundException".
+ //
+ // Backup not found for the given BackupARN.
+ ErrCodeBackupNotFoundException = "BackupNotFoundException"
+
+ // ErrCodeConditionalCheckFailedException for service response error code
+ // "ConditionalCheckFailedException".
+ //
+ // A condition specified in the operation could not be evaluated.
+ ErrCodeConditionalCheckFailedException = "ConditionalCheckFailedException"
+
+ // ErrCodeContinuousBackupsUnavailableException for service response error code
+ // "ContinuousBackupsUnavailableException".
+ //
+ // Backups have not yet been enabled for this table.
+ ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException"
+
+ // ErrCodeGlobalTableAlreadyExistsException for service response error code
+ // "GlobalTableAlreadyExistsException".
+ //
+ // The specified global table already exists.
+ ErrCodeGlobalTableAlreadyExistsException = "GlobalTableAlreadyExistsException"
+
+ // ErrCodeGlobalTableNotFoundException for service response error code
+ // "GlobalTableNotFoundException".
+ //
+ // The specified global table does not exist.
+ ErrCodeGlobalTableNotFoundException = "GlobalTableNotFoundException"
+
+ // ErrCodeIndexNotFoundException for service response error code
+ // "IndexNotFoundException".
+ //
+ // The operation tried to access a nonexistent index.
+ ErrCodeIndexNotFoundException = "IndexNotFoundException"
+
+ // ErrCodeInternalServerError for service response error code
+ // "InternalServerError".
+ //
+ // An error occurred on the server side.
+ ErrCodeInternalServerError = "InternalServerError"
+
+ // ErrCodeInvalidRestoreTimeException for service response error code
+ // "InvalidRestoreTimeException".
+ //
+ // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime
+ // and LatestRestorableDateTime.
+ ErrCodeInvalidRestoreTimeException = "InvalidRestoreTimeException"
+
+ // ErrCodeItemCollectionSizeLimitExceededException for service response error code
+ // "ItemCollectionSizeLimitExceededException".
+ //
+ // An item collection is too large. This exception is only returned for tables
+ // that have one or more local secondary indexes.
+ ErrCodeItemCollectionSizeLimitExceededException = "ItemCollectionSizeLimitExceededException"
+
+ // ErrCodeLimitExceededException for service response error code
+ // "LimitExceededException".
+ //
+ // There is no limit to the number of daily on-demand backups that can be taken.
+ //
+ // Up to 10 simultaneous table operations are allowed per account. These operations
+ // include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+ // and RestoreTableToPointInTime.
+ //
+ // For tables with secondary indexes, only one of those tables can be in the
+ // CREATING state at any point in time. Do not attempt to create more than one
+ // such table simultaneously.
+ //
+ // The total limit of tables in the ACTIVE state is 250.
+ ErrCodeLimitExceededException = "LimitExceededException"
+
+ // ErrCodePointInTimeRecoveryUnavailableException for service response error code
+ // "PointInTimeRecoveryUnavailableException".
+ //
+ // Point in time recovery has not yet been enabled for this source table.
+ ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException"
+
+ // ErrCodeProvisionedThroughputExceededException for service response error code
+ // "ProvisionedThroughputExceededException".
+ //
+ // Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+ // requests that receive this exception. Your request is eventually successful,
+ // unless your retry queue is too large to finish. Reduce the frequency of requests
+ // and use exponential backoff. For more information, go to Error Retries and
+ // Exponential Backoff (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+ // in the Amazon DynamoDB Developer Guide.
+ ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException"
+
+ // ErrCodeReplicaAlreadyExistsException for service response error code
+ // "ReplicaAlreadyExistsException".
+ //
+ // The specified replica is already part of the global table.
+ ErrCodeReplicaAlreadyExistsException = "ReplicaAlreadyExistsException"
+
+ // ErrCodeReplicaNotFoundException for service response error code
+ // "ReplicaNotFoundException".
+ //
+ // The specified replica is no longer part of the global table.
+ ErrCodeReplicaNotFoundException = "ReplicaNotFoundException"
+
+ // ErrCodeResourceInUseException for service response error code
+ // "ResourceInUseException".
+ //
+ // The operation conflicts with the resource's availability. For example, you
+ // attempted to recreate an existing table, or tried to delete a table currently
+ // in the CREATING state.
+ ErrCodeResourceInUseException = "ResourceInUseException"
+
+ // ErrCodeResourceNotFoundException for service response error code
+ // "ResourceNotFoundException".
+ //
+ // The operation tried to access a nonexistent table or index. The resource
+ // might not be specified correctly, or its status might not be ACTIVE.
+ ErrCodeResourceNotFoundException = "ResourceNotFoundException"
+
+ // ErrCodeTableAlreadyExistsException for service response error code
+ // "TableAlreadyExistsException".
+ //
+ // A target table with the specified name already exists.
+ ErrCodeTableAlreadyExistsException = "TableAlreadyExistsException"
+
+ // ErrCodeTableInUseException for service response error code
+ // "TableInUseException".
+ //
+ // A target table with the specified name is either being created or deleted.
+ ErrCodeTableInUseException = "TableInUseException"
+
+ // ErrCodeTableNotFoundException for service response error code
+ // "TableNotFoundException".
+ //
+ // A source table with the name TableName does not currently exist within the
+ // subscriber's account.
+ ErrCodeTableNotFoundException = "TableNotFoundException"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/service.go
new file mode 100644
index 0000000..f278528
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/service.go
@@ -0,0 +1,87 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc"
+)
+
+// DynamoDB provides the API operation methods for making requests to
+// Amazon DynamoDB. See this package's package overview docs
+// for details on the service.
+//
+// DynamoDB methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type DynamoDB struct {
+ *aws.Client
+
+ // Service specific configurations. (codegen: service_specific_config.go)
+
+ // Disables the computation and validation of request and response checksums.
+ DisableComputeChecksums bool
+}
+
+// Used for custom client initialization logic
+var initClient func(*DynamoDB)
+
+// Used for custom request initialization logic
+var initRequest func(*DynamoDB, *aws.Request)
+
+// Service information constants
+const (
+ ServiceName = "dynamodb" // Service endpoint prefix API calls made to.
+ EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
+)
+
+// New creates a new instance of the DynamoDB client with a config.
+//
+// Example:
+// // Create a DynamoDB client from just a config.
+// svc := dynamodb.New(myConfig)
+func New(config aws.Config) *DynamoDB {
+ var signingName string
+ signingRegion := config.Region
+
+ svc := &DynamoDB{
+ Client: aws.NewClient(
+ config,
+ aws.Metadata{
+ ServiceName: ServiceName,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ APIVersion: "2012-08-10",
+ JSONVersion: "1.0",
+ TargetPrefix: "DynamoDB_20120810",
+ },
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a DynamoDB operation and runs any
+// custom request initialization.
+func (c *DynamoDB) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(c, req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/waiters.go
new file mode 100644
index 0000000..3d0429d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/waiters.go
@@ -0,0 +1,106 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// WaitUntilTableExists uses the DynamoDB API operation
+// DescribeTable to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error {
+ return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilTableExistsWithContext is an extended version of WaitUntilTableExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...aws.WaiterOption) error {
+ w := aws.Waiter{
+ Name: "WaitUntilTableExists",
+ MaxAttempts: 25,
+ Delay: aws.ConstantWaiterDelay(20 * time.Second),
+ Acceptors: []aws.WaiterAcceptor{
+ {
+ State: aws.SuccessWaiterState,
+ Matcher: aws.PathWaiterMatch, Argument: "Table.TableStatus",
+ Expected: "ACTIVE",
+ },
+ {
+ State: aws.RetryWaiterState,
+ Matcher: aws.ErrorWaiterMatch,
+ Expected: "ResourceNotFoundException",
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []aws.Option) (*aws.Request, error) {
+ var inCpy *DescribeTableInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req := c.DescribeTableRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req.Request, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilTableNotExists uses the DynamoDB API operation
+// DescribeTable to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error {
+ return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilTableNotExistsWithContext is an extended version of WaitUntilTableNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) WaitUntilTableNotExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...aws.WaiterOption) error {
+ w := aws.Waiter{
+ Name: "WaitUntilTableNotExists",
+ MaxAttempts: 25,
+ Delay: aws.ConstantWaiterDelay(20 * time.Second),
+ Acceptors: []aws.WaiterAcceptor{
+ {
+ State: aws.SuccessWaiterState,
+ Matcher: aws.ErrorWaiterMatch,
+ Expected: "ResourceNotFoundException",
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []aws.Option) (*aws.Request, error) {
+ var inCpy *DescribeTableInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req := c.DescribeTableRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req.Request, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api.go
new file mode 100644
index 0000000..fad7e8b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api.go
@@ -0,0 +1,1832 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest is a API request type for the AssumeRole API operation.
+type AssumeRoleRequest struct {
+ *aws.Request
+ Input *AssumeRoleInput
+ Copy func(*AssumeRoleInput) AssumeRoleRequest
+}
+
+// Send marshals and sends the AssumeRole API request.
+func (r AssumeRoleRequest) Send() (*AssumeRoleOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*AssumeRoleOutput), nil
+}
+
+// AssumeRoleRequest returns a request value for making API operation for
+// AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) that you can use to access
+// AWS resources that you might not normally have access to. Typically, you
+// use AssumeRole for cross-account access or federation. For a comparison of
+// AssumeRole with the other APIs that produce temporary credentials, see Requesting
+// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// Important: You cannot call AssumeRole by using AWS root account credentials;
+// access is denied. You must use credentials for an IAM user or an IAM role
+// to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account and
+// then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
+// in the IAM User Guide.
+//
+// For federation, you can, for example, grant single sign-on access to the
+// AWS Management Console. If you already have an identity and authentication
+// system in your corporate network, you don't have to recreate user identities
+// in AWS in order to grant those user identities access to AWS. Instead, after
+// a user has been authenticated, you call AssumeRole (and specify the role
+// with the appropriate permissions) to get temporary security credentials for
+// that user. With those temporary security credentials, you construct a sign-in
+// URL that users can use to access the console. For more information, see Common
+// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
+// in the IAM User Guide.
+//
+// By default, the temporary security credentials created by AssumeRole last
+// for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any AWS service with the following exception: you cannot call
+// the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// To assume a role, your AWS account must be trusted by the role. The trust
+// relationship is defined in the role's trust policy when the role is created.
+// That trust policy states which accounts are allowed to delegate access to
+// this account's role.
+//
+// The user who wants to access the role must also have permissions delegated
+// from the role's administrator. If the user is in a different account than
+// the role, then the user's administrator must attach a policy that allows
+// the user to call AssumeRole on the ARN of the role in the other account.
+// If the user is in the same account as the role, then you can either attach
+// a policy to the user (identical to the previous different account user),
+// or you can add the user as a principal directly in the role's trust policy.
+// In this case, the trust policy acts as the only resource-based policy in
+// IAM, and users in the same account as the role do not need explicit permission
+// to assume the role. For more information about trust policies and resource-based
+// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
+// in the IAM User Guide.
+//
+// Using MFA with AssumeRole
+//
+// You can optionally include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios in which
+// you want to make sure that the user who is assuming the role has been authenticated
+// using an AWS MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication; if the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication
+// might look like the following example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA devices produces.
+//
+// // Example sending a request using the AssumeRoleRequest method.
+// req := client.AssumeRoleRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) AssumeRoleRequest {
+ op := &aws.Operation{
+ Name: opAssumeRole,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleInput{}
+ }
+
+ output := &AssumeRoleOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return AssumeRoleRequest{Request: req, Input: input, Copy: c.AssumeRoleRequest}
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest is a API request type for the AssumeRoleWithSAML API operation.
+type AssumeRoleWithSAMLRequest struct {
+ *aws.Request
+ Input *AssumeRoleWithSAMLInput
+ Copy func(*AssumeRoleWithSAMLInput) AssumeRoleWithSAMLRequest
+}
+
+// Send marshals and sends the AssumeRoleWithSAML API request.
+func (r AssumeRoleWithSAMLRequest) Send() (*AssumeRoleWithSAMLOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*AssumeRoleWithSAMLOutput), nil
+}
+
+// AssumeRoleWithSAMLRequest returns a request value for making API operation for
+// AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML authentication
+// response's SessionNotOnOrAfter value, whichever is shorter. You can provide
+// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
+// duration setting for the role. This setting can have a value from 1 hour
+// to 12 hours. To learn how to view the maximum value for your role, see View
+// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used
+// to make API calls to any AWS service with the following exception: you cannot
+// call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by the intersection of both the access policy
+// of the role that is being assumed, and the policy that you pass. This means
+// that both policies must grant the permission for the action to be allowed.
+// This gives you a way to further restrict the permissions for the resulting
+// temporary security credentials. You cannot use the passed policy to grant
+// permissions that are in excess of those allowed by the access policy of the
+// role that is being assumed. For more information, see Permissions for AssumeRole,
+// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider, and create
+// an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
+// logs. The entry includes the value in the NameID element of the SAML assertion.
+// We recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the Persistent
+// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+//
+// For more information, see the following resources:
+//
+// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the IAM User Guide.
+//
+// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the IAM User Guide.
+//
+// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the IAM User Guide.
+//
+// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the IAM User Guide.
+//
+// // Example sending a request using the AssumeRoleWithSAMLRequest method.
+// req := client.AssumeRoleWithSAMLRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) AssumeRoleWithSAMLRequest {
+ op := &aws.Operation{
+ Name: opAssumeRoleWithSAML,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithSAMLInput{}
+ }
+
+ output := &AssumeRoleWithSAMLOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return AssumeRoleWithSAMLRequest{Request: req, Input: input, Copy: c.AssumeRoleWithSAMLRequest}
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest is a API request type for the AssumeRoleWithWebIdentity API operation.
+type AssumeRoleWithWebIdentityRequest struct {
+ *aws.Request
+ Input *AssumeRoleWithWebIdentityInput
+ Copy func(*AssumeRoleWithWebIdentityInput) AssumeRoleWithWebIdentityRequest
+}
+
+// Send marshals and sends the AssumeRoleWithWebIdentity API request.
+func (r AssumeRoleWithWebIdentityRequest) Send() (*AssumeRoleWithWebIdentityOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*AssumeRoleWithWebIdentityOutput), nil
+}
+
+// AssumeRoleWithWebIdentityRequest returns a request value for making API operation for
+// AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider, such as Amazon
+// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
+// identity provider.
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can
+// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
+// identify a user and supply the user with a consistent identity throughout
+// the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
+// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application, and without deploying server-based
+// proxy services that use long-term AWS credentials. Instead, the identity
+// of the caller is validated by using a token from the web identity provider.
+// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce
+// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service APIs.
+//
+// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can
+// be used to make API calls to any AWS service with the following exception:
+// you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
+// of the provided Web Identity Token. We recommend that you avoid using any
+// personally identifiable information (PII) in this field. For example, you
+// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
+// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+//
+// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// This interactive website lets you walk through the process of authenticating
+// via Login with Amazon, Facebook, or Google, getting temporary security
+// credentials, and then using those credentials to make a request to AWS.
+//
+//
+// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
+// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
+// apps that show how to invoke the identity providers, and then how to use
+// the information from these providers to get and use temporary security
+// credentials.
+//
+// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
+// This article discusses web identity federation and shows an example of
+// how to use web identity federation to get access to content in Amazon
+// S3.
+//
+// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
+// req := client.AssumeRoleWithWebIdentityRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) AssumeRoleWithWebIdentityRequest {
+ op := &aws.Operation{
+ Name: opAssumeRoleWithWebIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ output := &AssumeRoleWithWebIdentityOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return AssumeRoleWithWebIdentityRequest{Request: req, Input: input, Copy: c.AssumeRoleWithWebIdentityRequest}
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest is a API request type for the DecodeAuthorizationMessage API operation.
+type DecodeAuthorizationMessageRequest struct {
+ *aws.Request
+ Input *DecodeAuthorizationMessageInput
+ Copy func(*DecodeAuthorizationMessageInput) DecodeAuthorizationMessageRequest
+}
+
+// Send marshals and sends the DecodeAuthorizationMessage API request.
+func (r DecodeAuthorizationMessageRequest) Send() (*DecodeAuthorizationMessageOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*DecodeAuthorizationMessageOutput), nil
+}
+
+// DecodeAuthorizationMessageRequest returns a request value for making API operation for
+// AWS Security Token Service.
+//
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an action that he or
+// she has requested, the request returns a Client.UnauthorizedOperation response
+// (an HTTP 403 response). Some AWS actions additionally return an encoded message
+// that can provide details about this authorization failure.
+//
+// Only certain AWS actions return an encoded authorization message. The documentation
+// for an individual action indicates whether that action returns an encoded
+// message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// constitute privileged information that the user who requested the action
+// should not see. To decode an authorization status message, a user must be
+// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
+// (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+// * Whether the request was denied due to an explicit deny or due to the
+// absence of an explicit allow. For more information, see Determining Whether
+// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the IAM User Guide.
+//
+// * The principal who made the request.
+//
+// * The requested action.
+//
+// * The requested resource.
+//
+// * The values of condition keys in the context of the user's request.
+//
+// // Example sending a request using the DecodeAuthorizationMessageRequest method.
+// req := client.DecodeAuthorizationMessageRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) DecodeAuthorizationMessageRequest {
+ op := &aws.Operation{
+ Name: opDecodeAuthorizationMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DecodeAuthorizationMessageInput{}
+ }
+
+ output := &DecodeAuthorizationMessageOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return DecodeAuthorizationMessageRequest{Request: req, Input: input, Copy: c.DecodeAuthorizationMessageRequest}
+}
+
+const opGetCallerIdentity = "GetCallerIdentity"
+
+// GetCallerIdentityRequest is a API request type for the GetCallerIdentity API operation.
+type GetCallerIdentityRequest struct {
+ *aws.Request
+ Input *GetCallerIdentityInput
+ Copy func(*GetCallerIdentityInput) GetCallerIdentityRequest
+}
+
+// Send marshals and sends the GetCallerIdentity API request.
+func (r GetCallerIdentityRequest) Send() (*GetCallerIdentityOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*GetCallerIdentityOutput), nil
+}
+
+// GetCallerIdentityRequest returns a request value for making API operation for
+// AWS Security Token Service.
+//
+// Returns details about the IAM identity whose credentials are used to call
+// the API.
+//
+// // Example sending a request using the GetCallerIdentityRequest method.
+// req := client.GetCallerIdentityRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) GetCallerIdentityRequest {
+ op := &aws.Operation{
+ Name: opGetCallerIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetCallerIdentityInput{}
+ }
+
+ output := &GetCallerIdentityOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return GetCallerIdentityRequest{Request: req, Input: input, Copy: c.GetCallerIdentityRequest}
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest is a API request type for the GetFederationToken API operation.
+type GetFederationTokenRequest struct {
+ *aws.Request
+ Input *GetFederationTokenInput
+ Copy func(*GetFederationTokenInput) GetFederationTokenRequest
+}
+
+// Send marshals and sends the GetFederationToken API request.
+func (r GetFederationTokenRequest) Send() (*GetFederationTokenOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*GetFederationTokenOutput), nil
+}
+
+// GetFederationTokenRequest returns a request value for making API operation for
+// AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. Because
+// you must call the GetFederationToken action using the long-term security
+// credentials of an IAM user, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+// For a comparison of GetFederationToken with the other APIs that produce temporary
+// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// If you are creating a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider, we recommend that you
+// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// The GetFederationToken action must be called by using the long-term AWS security
+// credentials of an IAM user. You can also call GetFederationToken using the
+// security credentials of an AWS root account, but we do not recommended it.
+// Instead, we recommend that you create an IAM user for the purpose of the
+// proxy application and then attach a policy to the IAM user that limits federated
+// users to only the actions and resources that they need access to. For more
+// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials that are obtained by using the long-term
+// credentials of an IAM user are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default
+// is 43200 seconds (12 hours). Temporary credentials that are obtained by using
+// AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
+//
+// The temporary security credentials created by GetFederationToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot use these credentials to call any IAM APIs.
+//
+// * You cannot call any STS APIs except GetCallerIdentity.
+//
+// Permissions
+//
+// The permissions for the temporary security credentials returned by GetFederationToken
+// are determined by a combination of the following:
+//
+// * The policy or policies that are attached to the IAM user whose credentials
+// are used to call GetFederationToken.
+//
+// * The policy that is passed as a parameter in the call.
+//
+// The passed policy is attached to the temporary security credentials that
+// result from the GetFederationToken API call--that is, to the federated user.
+// When the federated user makes an AWS request, AWS evaluates the policy attached
+// to the federated user in combination with the policy or policies attached
+// to the IAM user whose credentials were used to call GetFederationToken. AWS
+// allows the federated user's request only when both the federated user and
+// the IAM user are explicitly allowed to perform the requested action. The
+// passed policy cannot grant more permissions than those that are defined in
+// the IAM user policy.
+//
+// A typical use case is that the permissions of the IAM user whose credentials
+// are used to call GetFederationToken are designed to allow access to all the
+// actions and resources that any federated user will need. Then, for individual
+// users, you pass a policy to the operation that scopes down the permissions
+// to a level that's appropriate to that individual user, using a policy that
+// allows only a subset of permissions that are granted to the IAM user.
+//
+// If you do not pass a policy, the resulting temporary security credentials
+// have no effective permissions. The only exception is when the temporary security
+// credentials are used to access a resource that has a resource-based policy
+// that specifically allows the federated user to access the resource.
+//
+// For more information about how permissions work, see Permissions for GetFederationToken
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+// For information about using GetFederationToken to create temporary security
+// credentials, see GetFederationToken—Federation Through a Custom Identity
+// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// // Example sending a request using the GetFederationTokenRequest method.
+// req := client.GetFederationTokenRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) GetFederationTokenRequest {
+ op := &aws.Operation{
+ Name: opGetFederationToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetFederationTokenInput{}
+ }
+
+ output := &GetFederationTokenOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return GetFederationTokenRequest{Request: req, Input: input, Copy: c.GetFederationTokenRequest}
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest is a API request type for the GetSessionToken API operation.
+type GetSessionTokenRequest struct {
+ *aws.Request
+ Input *GetSessionTokenInput
+ Copy func(*GetSessionTokenInput) GetSessionTokenRequest
+}
+
+// Send marshals and sends the GetSessionToken API request.
+func (r GetSessionTokenRequest) Send() (*GetSessionTokenOutput, error) {
+ err := r.Request.Send()
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Request.Data.(*GetSessionTokenOutput), nil
+}
+
+// GetSessionTokenRequest returns a request value for making API operation for
+// AWS Security Token Service.
+//
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
+// IAM users would need to call GetSessionToken and submit an MFA code that
+// is associated with their MFA device. Using the temporary security credentials
+// that are returned from the call, IAM users can then make programmatic calls
+// to APIs that require MFA authentication. If you do not supply a correct MFA
+// code, then the API returns an access denied error. For a comparison of GetSessionToken
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The GetSessionToken action must be called by using the long-term AWS security
+// credentials of the AWS account or an IAM user. Credentials that are created
+// by IAM users are valid for the duration that you specify, from 900 seconds
+// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default
+// of 43200 seconds (12 hours); credentials that are created by using account
+// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600
+// seconds (1 hour), with a default of 1 hour.
+//
+// The temporary security credentials created by GetSessionToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot call any IAM APIs unless MFA authentication information is
+// included in the request.
+//
+// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity.
+//
+// We recommend that you do not call GetSessionToken with root account credentials.
+// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+// The permissions associated with the temporary security credentials returned
+// by GetSessionToken are based on the permissions associated with account or
+// IAM user whose credentials are used to call the action. If GetSessionToken
+// is called using root account credentials, the temporary credentials have
+// root account permissions. Similarly, if GetSessionToken is called using the
+// credentials of an IAM user, the temporary credentials have the same permissions
+// as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the IAM User Guide.
+//
+// // Example sending a request using the GetSessionTokenRequest method.
+// req := client.GetSessionTokenRequest(params)
+// resp, err := req.Send()
+// if err == nil {
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) GetSessionTokenRequest {
+ op := &aws.Operation{
+ Name: opGetSessionToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSessionTokenInput{}
+ }
+
+ output := &GetSessionTokenOutput{}
+ req := c.newRequest(op, input, output)
+ output.responseMetadata = aws.Response{Request: req}
+
+ return GetSessionTokenRequest{Request: req, Input: input, Copy: c.GetSessionTokenRequest}
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
+type AssumeRoleInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // A unique identifier that is used by third parties when assuming roles in
+ // their customers' accounts. For each role that the third party can assume,
+ // they should instruct their customers to ensure the role's trust policy checks
+ // for the external ID that the third party generated. Each time the third party
+ // assumes the role, they should pass the customer's external ID. The external
+ // ID is useful in order to help third parties bind a role to the customer who
+ // created it. For more information about the external ID, see How to Use an
+ // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validated this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ ExternalId *string `min:"2" type:"string"`
+
+ // An IAM policy in JSON format.
+ //
+ // This parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both (the intersection of) the access policy of the role that
+ // is being assumed, and the policy that you pass. This gives you a way to further
+ // restrict the permissions for the resulting temporary security credentials.
+ // You cannot use the passed policy to grant permissions that are in excess
+ // of those allowed by the access policy of the role that is being assumed.
+ // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+ // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role
+ // is assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the
+ // account that owns the role. The role session name is also used in the ARN
+ // of the assumed role principal. This means that subsequent cross-account API
+ // requests using the temporary security credentials will expose the role session
+ // name to the external account in their CloudTrail logs.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "AssumeRoleInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.ExternalId != nil && len(*s.ExternalId) < 2 {
+ invalidParams.Add(aws.NewErrParamMinLen("ExternalId", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("Policy", 1))
+ }
+
+ if s.RoleArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(aws.NewErrParamMinLen("RoleArn", 20))
+ }
+
+ if s.RoleSessionName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(aws.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(aws.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(aws.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse
+type AssumeRoleOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s AssumeRoleOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest
+type AssumeRoleWithSAMLInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. Your role session lasts for
+ // the duration that you specify for the DurationSeconds parameter, or until
+ // the time specified in the SAML authentication response's SessionNotOnOrAfter
+ // value, whichever is shorter. You can provide a DurationSeconds value from
+ // 900 seconds (15 minutes) up to the maximum session duration setting for the
+ // role. This setting can have a value from 1 hour to 12 hours. If you specify
+ // a value higher than this setting, the operation fails. For example, if you
+ // specify a session duration of 12 hours, but your administrator set the maximum
+ // session duration to 6 hours, your operation fails. To learn how to view the
+ // maximum value for your role, see View the Maximum Session Duration Setting
+ // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+ // the IdP.
+ //
+ // PrincipalArn is a required field
+ PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // The base-64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+ // in the Using IAM guide.
+ //
+ // SAMLAssertion is a required field
+ SAMLAssertion *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithSAMLInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("Policy", 1))
+ }
+
+ if s.PrincipalArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("PrincipalArn"))
+ }
+ if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
+ invalidParams.Add(aws.NewErrParamMinLen("PrincipalArn", 20))
+ }
+
+ if s.RoleArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(aws.NewErrParamMinLen("RoleArn", 20))
+ }
+
+ if s.SAMLAssertion == nil {
+ invalidParams.Add(aws.NewErrParamRequired("SAMLAssertion"))
+ }
+ if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
+ invalidParams.Add(aws.NewErrParamMinLen("SAMLAssertion", 4))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse
+type AssumeRoleWithSAMLOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element
+ // of the SAML assertion.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string `type:"string"`
+
+ // A hash value based on the concatenation of the Issuer response value, the
+ // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+ // provider in IAM. The combination of NameQualifier and Subject can be used
+ // to uniquely identify a federated user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+ // ) )
+ NameQualifier *string `type:"string"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string `type:"string"`
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient
+ // or persistent.
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+ // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+ // is returned as transient. If the format includes any other prefix, the format
+ // is returned with no modifications.
+ SubjectType *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s AssumeRoleWithSAMLOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest
+type AssumeRoleWithWebIdentityInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The fully qualified host component of the domain name of the identity provider.
+ //
+ // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+ // and graph.facebook.com are the only supported identity providers for OAuth
+ // 2.0 access tokens. Do not include URL schemes and port numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
+ ProviderId *string `min:"4" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session. Typically, you pass the name
+ // or identifier that is associated with the user who is using your application.
+ // That way, the temporary security credentials that your application will use
+ // are associated with that user. This session name is included as part of the
+ // ARN and assumed role ID in the AssumedRoleUser response element.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+ // the identity provider. Your application must get this token by authenticating
+ // the user who is using your application with a web identity provider before
+ // the application makes an AssumeRoleWithWebIdentity call.
+ //
+ // WebIdentityToken is a required field
+ WebIdentityToken *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithWebIdentityInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("Policy", 1))
+ }
+ if s.ProviderId != nil && len(*s.ProviderId) < 4 {
+ invalidParams.Add(aws.NewErrParamMinLen("ProviderId", 4))
+ }
+
+ if s.RoleArn == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(aws.NewErrParamMinLen("RoleArn", 20))
+ }
+
+ if s.RoleSessionName == nil {
+ invalidParams.Add(aws.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(aws.NewErrParamMinLen("RoleSessionName", 2))
+ }
+
+ if s.WebIdentityToken == nil {
+ invalidParams.Add(aws.NewErrParamRequired("WebIdentityToken"))
+ }
+ if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
+ invalidParams.Add(aws.NewErrParamMinLen("WebIdentityToken", 4))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse
+type AssumeRoleWithWebIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The intended audience (also known as client ID) of the web identity token.
+ // This is traditionally the client identifier issued to the application that
+ // requested the web identity token.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID Tokens this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed
+ // in the AssumeRoleWithWebIdentity request.
+ Provider *string `type:"string"`
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with
+ // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+ // the user and the application that acquired the WebIdentityToken (pairwise
+ // identifier). For OpenID Connect ID tokens, this field contains the value
+ // returned by the identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s AssumeRoleWithWebIdentityOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
+type AssumedRoleUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the temporary security credentials that are returned from the
+ // AssumeRole action. For more information about ARNs and how to use them in
+ // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // A unique identifier that contains the role ID and the role session name of
+ // the role that is being assumed. The role ID is generated by AWS when the
+ // role is created.
+ //
+ // AssumedRoleId is a required field
+ AssumedRoleId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+ return s.String()
+}
+
+// AWS credentials for API authentication.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials
+type Credentials struct {
+ _ struct{} `type:"structure"`
+
+ // The access key ID that identifies the temporary security credentials.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+ // The date on which the current credentials expire.
+ //
+ // Expiration is a required field
+ Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
+
+ // The secret access key that can be used to sign requests.
+ //
+ // SecretAccessKey is a required field
+ SecretAccessKey *string `type:"string" required:"true"`
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ //
+ // SessionToken is a required field
+ SessionToken *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest
+type DecodeAuthorizationMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The encoded message that was returned with the response.
+ //
+ // EncodedMessage is a required field
+ EncodedMessage *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DecodeAuthorizationMessageInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
+
+ if s.EncodedMessage == nil {
+ invalidParams.Add(aws.NewErrParamRequired("EncodedMessage"))
+ }
+ if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("EncodedMessage", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse
+type DecodeAuthorizationMessageOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // An XML document that contains the decoded message.
+ DecodedMessage *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s DecodeAuthorizationMessageOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser
+type FederatedUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN that specifies the federated user that is associated with the credentials.
+ // For more information about ARNs and how to use them in policies, see IAM
+ // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ //
+ // FederatedUserId is a required field
+ FederatedUserId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest
+type GetCallerIdentityInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful GetCallerIdentity request, including
+// information about the entity making the request.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse
+type GetCallerIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The AWS account ID number of the account that owns or contains the calling
+ // entity.
+ Account *string `type:"string"`
+
+ // The AWS ARN associated with the calling entity.
+ Arn *string `min:"20" type:"string"`
+
+ // The unique identifier of the calling entity. The exact value depends on the
+ // type of entity making the call. The values returned are those listed in the
+ // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+ // found on the Policy Variables reference page in the IAM User Guide.
+ UserId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s GetCallerIdentityOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest
+type GetFederationTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
+ // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
+ // using AWS account (root) credentials are restricted to a maximum of 3600
+ // seconds (one hour). If the specified duration is longer than one hour, the
+ // session obtained by using AWS account (root) credentials defaults to one
+ // hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon
+ // S3 bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // Name is a required field
+ Name *string `min:"2" type:"string" required:"true"`
+
+ // An IAM policy in JSON format that is passed with the GetFederationToken call
+ // and evaluated along with the policy or policies that are attached to the
+ // IAM user whose credentials are used to call GetFederationToken. The passed
+ // policy is used to scope down the permissions that are available to the IAM
+ // user, by allowing only a subset of the permissions that are granted to the
+ // IAM user. The passed policy cannot grant more permissions than those granted
+ // to the IAM user. The final permissions for the federated user are the most
+ // restrictive set based on the intersection of the passed policy and the IAM
+ // user policy.
+ //
+ // If you do not pass a policy, the resulting temporary security credentials
+ // have no effective permissions. The only exception is when the temporary security
+ // credentials are used to access a resource that has a resource-based policy
+ // that specifically allows the federated user to access the resource.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ //
+ // For more information about how permissions work, see Permissions for GetFederationToken
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+ Policy *string `min:"1" type:"string"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetFederationTokenInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "GetFederationTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900))
+ }
+
+ if s.Name == nil {
+ invalidParams.Add(aws.NewErrParamRequired("Name"))
+ }
+ if s.Name != nil && len(*s.Name) < 2 {
+ invalidParams.Add(aws.NewErrParamMinLen("Name", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(aws.NewErrParamMinLen("Policy", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse
+type GetFederationTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // Identifiers for the federated user associated with the credentials (such
+ // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+ // can use the federated user's ARN in your resource-based policies, such as
+ // an Amazon S3 bucket policy.
+ FederatedUser *FederatedUser `type:"structure"`
+
+ // A percentage value indicating the size of the policy in packed form. The
+ // service rejects policies for which the packed size is greater than 100 percent
+ // of the allowed value.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s GetFederationTokenOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest
+type GetSessionTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
+ // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
+ // for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
+ // If the duration is longer than one hour, the session for AWS account owners
+ // defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM
+ // user has a policy that requires MFA authentication. The value is either the
+ // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+ // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ // You can find the device for an IAM user by going to the AWS Management Console
+ // and viewing the user's security credentials.
+ //
+ // The regex used to validated this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if MFA is required. If any policy requires
+ // the IAM user to submit an MFA code, specify this value. If MFA authentication
+ // is required, and the user does not provide a code when requesting a set of
+ // temporary security credentials, the user will receive an "access denied"
+ // response when requesting resources that require MFA authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetSessionTokenInput) Validate() error {
+ invalidParams := aws.ErrInvalidParams{Context: "GetSessionTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(aws.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(aws.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(aws.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse
+type GetSessionTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ responseMetadata aws.Response
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SDKResponseMetdata return sthe response metadata for the API.
+func (s GetSessionTokenOutput) SDKResponseMetadata() aws.Response {
+ return s.responseMetadata
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/customizations.go
new file mode 100644
index 0000000..488324c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/customizations.go
@@ -0,0 +1,12 @@
+package sts
+
+import request "github.com/aws/aws-sdk-go-v2/aws"
+
+func init() {
+ initRequest = func(c *STS, r *request.Request) {
+ switch r.Operation.Name {
+ case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
+ r.Handlers.Sign.Clear() // these operations are unsigned
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
new file mode 100644
index 0000000..a43fa80
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
@@ -0,0 +1,72 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sts provides the client and types for making API
+// requests to AWS Security Token Service.
+//
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// As an alternative to using the API, you can use one of the AWS SDKs, which
+// consist of libraries and sample code for various programming languages and
+// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
+// way to create programmatic access to STS. For example, the SDKs take care
+// of cryptographically signing requests, managing errors, and retrying requests
+// automatically. For information about the AWS SDKs, including how to download
+// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+//
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// in the AWS General Reference. For general information about the Query API,
+// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// in Using IAM. For information about using security tokens with other AWS
+// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the IAM User Guide.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/).
+//
+// Endpoints
+//
+// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
+// that maps to the US East (N. Virginia) region. Additional regions are available
+// and are activated by default. For more information, see Activating and Deactivating
+// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
+// in the AWS General Reference.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on. To learn more about CloudTrail, including how to turn it on and find
+// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
+//
+// See sts package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
+//
+// Using the Client
+//
+// To AWS Security Token Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Security Token Service client STS for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/errors.go
new file mode 100644
index 0000000..e24884e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/errors.go
@@ -0,0 +1,73 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+const (
+
+ // ErrCodeExpiredTokenException for service response error code
+ // "ExpiredTokenException".
+ //
+ // The web identity token that was passed is expired or is not valid. Get a
+ // new identity token from the identity provider and then retry the request.
+ ErrCodeExpiredTokenException = "ExpiredTokenException"
+
+ // ErrCodeIDPCommunicationErrorException for service response error code
+ // "IDPCommunicationError".
+ //
+ // The request could not be fulfilled because the non-AWS identity provider
+ // (IDP) that was asked to verify the incoming identity token could not be reached.
+ // This is often a transient error caused by network conditions. Retry the request
+ // a limited number of times so that you don't exceed the request rate. If the
+ // error persists, the non-AWS identity provider might be down or not responding.
+ ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
+
+ // ErrCodeIDPRejectedClaimException for service response error code
+ // "IDPRejectedClaim".
+ //
+ // The identity provider (IdP) reported that authentication failed. This might
+ // be because the claim is invalid.
+ //
+ // If this error is returned for the AssumeRoleWithWebIdentity operation, it
+ // can also mean that the claim has expired or has been explicitly revoked.
+ ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
+
+ // ErrCodeInvalidAuthorizationMessageException for service response error code
+ // "InvalidAuthorizationMessageException".
+ //
+ // The error returned if the message passed to DecodeAuthorizationMessage was
+ // invalid. This can happen if the token contains invalid characters, such as
+ // linebreaks.
+ ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
+
+ // ErrCodeInvalidIdentityTokenException for service response error code
+ // "InvalidIdentityToken".
+ //
+ // The web identity token that was passed could not be validated by AWS. Get
+ // a new identity token from the identity provider and then retry the request.
+ ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
+
+ // ErrCodeMalformedPolicyDocumentException for service response error code
+ // "MalformedPolicyDocument".
+ //
+ // The request was rejected because the policy document was malformed. The error
+ // message describes the specific error.
+ ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
+
+ // ErrCodePackedPolicyTooLargeException for service response error code
+ // "PackedPolicyTooLarge".
+ //
+ // The request was rejected because the policy document was too large. The error
+ // message describes how big the policy document is, in packed form, as a percentage
+ // of what the API allows.
+ ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
+
+ // ErrCodeRegionDisabledException for service response error code
+ // "RegionDisabledException".
+ //
+ // STS is not activated in the requested region for the account that is being
+ // asked to generate credentials. The account administrator must use the IAM
+ // console to activate STS in that region. For more information, see Activating
+ // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+ // in the IAM User Guide.
+ ErrCodeRegionDisabledException = "RegionDisabledException"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/service.go
new file mode 100644
index 0000000..79775f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/service.go
@@ -0,0 +1,80 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/private/protocol/query"
+)
+
+// STS provides the API operation methods for making requests to
+// AWS Security Token Service. See this package's package overview docs
+// for details on the service.
+//
+// STS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type STS struct {
+ *aws.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*STS)
+
+// Used for custom request initialization logic
+var initRequest func(*STS, *aws.Request)
+
+// Service information constants
+const (
+ ServiceName = "sts" // Service endpoint prefix API calls made to.
+ EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
+)
+
+// New creates a new instance of the STS client with a config.
+//
+// Example:
+// // Create a STS client from just a config.
+// svc := sts.New(myConfig)
+func New(config aws.Config) *STS {
+ var signingName string
+ signingRegion := config.Region
+
+ svc := &STS{
+ Client: aws.NewClient(
+ config,
+ aws.Metadata{
+ ServiceName: ServiceName,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ APIVersion: "2011-06-15",
+ },
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(query.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(c, req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml
new file mode 100644
index 0000000..6f440f1
--- /dev/null
+++ b/vendor/github.com/gorilla/context/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+sudo: false
+
+matrix:
+ include:
+ - go: 1.3
+ - go: 1.4
+ - go: 1.5
+ - go: 1.6
+ - go: 1.7
+ - go: tip
+ allow_failures:
+ - go: tip
+
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - go vet $(go list ./... | grep -v /vendor/)
+ - go test -v -race ./...
diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE
new file mode 100644
index 0000000..0e5fb87
--- /dev/null
+++ b/vendor/github.com/gorilla/context/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md
new file mode 100644
index 0000000..08f8669
--- /dev/null
+++ b/vendor/github.com/gorilla/context/README.md
@@ -0,0 +1,10 @@
+context
+=======
+[](https://travis-ci.org/gorilla/context)
+
+gorilla/context is a general purpose registry for global request variables.
+
+> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
+> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
+
+Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go
new file mode 100644
index 0000000..81cb128
--- /dev/null
+++ b/vendor/github.com/gorilla/context/context.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "net/http"
+ "sync"
+ "time"
+)
+
+var (
+ mutex sync.RWMutex
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+)
+
+// Set stores a value for a given key in a given request.
+func Set(r *http.Request, key, val interface{}) {
+ mutex.Lock()
+ if data[r] == nil {
+ data[r] = make(map[interface{}]interface{})
+ datat[r] = time.Now().Unix()
+ }
+ data[r][key] = val
+ mutex.Unlock()
+}
+
+// Get returns a value stored for a given key in a given request.
+func Get(r *http.Request, key interface{}) interface{} {
+ mutex.RLock()
+ if ctx := data[r]; ctx != nil {
+ value := ctx[key]
+ mutex.RUnlock()
+ return value
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetOk returns stored value and presence state like multi-value return of map access.
+func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
+ mutex.RLock()
+ if _, ok := data[r]; ok {
+ value, ok := data[r][key]
+ mutex.RUnlock()
+ return value, ok
+ }
+ mutex.RUnlock()
+ return nil, false
+}
+
+// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
+func GetAll(r *http.Request) map[interface{}]interface{} {
+ mutex.RLock()
+ if context, ok := data[r]; ok {
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
+// the request was registered.
+func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
+ mutex.RLock()
+ context, ok := data[r]
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result, ok
+}
+
+// Delete removes a value stored for a given key in a given request.
+func Delete(r *http.Request, key interface{}) {
+ mutex.Lock()
+ if data[r] != nil {
+ delete(data[r], key)
+ }
+ mutex.Unlock()
+}
+
+// Clear removes all values stored for a given request.
+//
+// This is usually called by a handler wrapper to clean up request
+// variables at the end of a request lifetime. See ClearHandler().
+func Clear(r *http.Request) {
+ mutex.Lock()
+ clear(r)
+ mutex.Unlock()
+}
+
+// clear is Clear without the lock.
+func clear(r *http.Request) {
+ delete(data, r)
+ delete(datat, r)
+}
+
+// Purge removes request data stored for longer than maxAge, in seconds.
+// It returns the amount of requests removed.
+//
+// If maxAge <= 0, all request data is removed.
+//
+// This is only used for sanity check: in case context cleaning was not
+// properly set some request data can be kept forever, consuming an increasing
+// amount of memory. In case this is detected, Purge() must be called
+// periodically until the problem is fixed.
+func Purge(maxAge int) int {
+ mutex.Lock()
+ count := 0
+ if maxAge <= 0 {
+ count = len(data)
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+ } else {
+ min := time.Now().Unix() - int64(maxAge)
+ for r := range data {
+ if datat[r] < min {
+ clear(r)
+ count++
+ }
+ }
+ }
+ mutex.Unlock()
+ return count
+}
+
+// ClearHandler wraps an http.Handler and clears request values at the end
+// of a request lifetime.
+func ClearHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer Clear(r)
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go
new file mode 100644
index 0000000..448d1bf
--- /dev/null
+++ b/vendor/github.com/gorilla/context/doc.go
@@ -0,0 +1,88 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package context stores values shared during a request lifetime.
+
+Note: gorilla/context, having been born well before `context.Context` existed,
+does not play well > with the shallow copying of the request that
+[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
+(added to net/http Go 1.7 onwards) performs. You should either use *just*
+gorilla/context, or moving forward, the new `http.Request.Context()`.
+
+For example, a router can set variables extracted from the URL and later
+application handlers can access those values, or it can be used to store
+sessions values to be saved at the end of a request. There are several
+others common uses.
+
+The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
+
+ http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
+
+Here's the basic usage: first define the keys that you will need. The key
+type is interface{} so a key can be of any type that supports equality.
+Here we define a key using a custom int type to avoid name collisions:
+
+ package foo
+
+ import (
+ "github.com/gorilla/context"
+ )
+
+ type key int
+
+ const MyKey key = 0
+
+Then set a variable. Variables are bound to an http.Request object, so you
+need a request instance to set a value:
+
+ context.Set(r, MyKey, "bar")
+
+The application can later access the variable using the same key you provided:
+
+ func MyHandler(w http.ResponseWriter, r *http.Request) {
+ // val is "bar".
+ val := context.Get(r, foo.MyKey)
+
+ // returns ("bar", true)
+ val, ok := context.GetOk(r, foo.MyKey)
+ // ...
+ }
+
+And that's all about the basic usage. We discuss some other ideas below.
+
+Any type can be stored in the context. To enforce a given type, make the key
+private and wrap Get() and Set() to accept and return values of a specific
+type:
+
+ type key int
+
+ const mykey key = 0
+
+ // GetMyKey returns a value for this package from the request values.
+ func GetMyKey(r *http.Request) SomeType {
+ if rv := context.Get(r, mykey); rv != nil {
+ return rv.(SomeType)
+ }
+ return nil
+ }
+
+ // SetMyKey sets a value for this package in the request values.
+ func SetMyKey(r *http.Request, val SomeType) {
+ context.Set(r, mykey, val)
+ }
+
+Variables must be cleared at the end of a request, to remove all values
+that were stored. This can be done in an http.Handler, after a request was
+served. Just call Clear() passing the request:
+
+ context.Clear(r)
+
+...or use ClearHandler(), which conveniently wraps an http.Handler to clear
+variables at the end of a request lifetime.
+
+The Routers from the packages gorilla/mux and gorilla/pat call Clear()
+so if you are using either of them you don't need to clear the context manually.
+*/
+package context
diff --git a/vendor/github.com/gorilla/handlers/.travis.yml b/vendor/github.com/gorilla/handlers/.travis.yml
new file mode 100644
index 0000000..1ba74af
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+sudo: false
+
+matrix:
+ include:
+ - go: 1.4
+ - go: 1.5
+ - go: 1.6
+ - go: 1.7
+ - go: 1.8
+ - go: tip
+ allow_failures:
+ - go: tip
+
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - go vet $(go list ./... | grep -v /vendor/)
+ - go test -v -race ./...
+
diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE
new file mode 100644
index 0000000..66ea3c8
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md
new file mode 100644
index 0000000..4a6895d
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/README.md
@@ -0,0 +1,55 @@
+gorilla/handlers
+================
+[](https://godoc.org/github.com/gorilla/handlers) [](https://travis-ci.org/gorilla/handlers)
+[](https://sourcegraph.com/github.com/gorilla/handlers?badge)
+
+
+Package handlers is a collection of handlers (aka "HTTP middleware") for use
+with Go's `net/http` package (or any framework supporting `http.Handler`), including:
+
+* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log
+ Format](http://httpd.apache.org/docs/2.2/logs.html#common).
+* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log
+ Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by
+ both Apache and nginx.
+* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses.
+* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted
+ content types.
+* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a
+ `map[string]http.Handler`
+* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the
+ `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded`
+ headers when running a Go server behind a HTTP reverse proxy.
+* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple
+ domains (i.e. multiple CNAME aliases).
+* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics.
+
+Other handlers are documented [on the Gorilla
+website](http://www.gorillatoolkit.org/pkg/handlers).
+
+## Example
+
+A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`:
+
+```go
+import (
+ "net/http"
+ "github.com/gorilla/handlers"
+)
+
+func main() {
+ r := http.NewServeMux()
+
+ // Only log requests to our admin dashboard to stdout
+ r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard)))
+ r.HandleFunc("/", ShowIndex)
+
+ // Wrap our server with our gzip handler to gzip compress all responses.
+ http.ListenAndServe(":8000", handlers.CompressHandler(r))
+}
+```
+
+## License
+
+BSD licensed. See the included LICENSE file for details.
+
diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go
new file mode 100644
index 0000000..8437fef
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/canonical.go
@@ -0,0 +1,74 @@
+package handlers
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type canonical struct {
+ h http.Handler
+ domain string
+ code int
+}
+
+// CanonicalHost is HTTP middleware that re-directs requests to the canonical
+// domain. It accepts a domain and a status code (e.g. 301 or 302) and
+// re-directs clients to this domain. The existing request path is maintained.
+//
+// Note: If the provided domain is considered invalid by url.Parse or otherwise
+// returns an empty scheme or host, clients are not re-directed.
+//
+// Example:
+//
+// r := mux.NewRouter()
+// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302)
+// r.HandleFunc("/route", YourHandler)
+//
+// log.Fatal(http.ListenAndServe(":7000", canonical(r)))
+//
+func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler {
+ fn := func(h http.Handler) http.Handler {
+ return canonical{h, domain, code}
+ }
+
+ return fn
+}
+
+func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ dest, err := url.Parse(c.domain)
+ if err != nil {
+ // Call the next handler if the provided domain fails to parse.
+ c.h.ServeHTTP(w, r)
+ return
+ }
+
+ if dest.Scheme == "" || dest.Host == "" {
+ // Call the next handler if the scheme or host are empty.
+ // Note that url.Parse won't fail on in this case.
+ c.h.ServeHTTP(w, r)
+ return
+ }
+
+ if !strings.EqualFold(cleanHost(r.Host), dest.Host) {
+ // Re-build the destination URL
+ dest := dest.Scheme + "://" + dest.Host + r.URL.Path
+ if r.URL.RawQuery != "" {
+ dest += "?" + r.URL.RawQuery
+ }
+ http.Redirect(w, r, dest, c.code)
+ return
+ }
+
+ c.h.ServeHTTP(w, r)
+}
+
+// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '.
+// This is backported from Go 1.5 (in response to issue #11206) and attempts to
+// mitigate malformed Host headers that do not match the format in RFC7230.
+func cleanHost(in string) string {
+ if i := strings.IndexAny(in, " /"); i != -1 {
+ return in[:i]
+ }
+ return in
+}
diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go
new file mode 100644
index 0000000..e8345d7
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/compress.go
@@ -0,0 +1,148 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "compress/flate"
+ "compress/gzip"
+ "io"
+ "net/http"
+ "strings"
+)
+
+type compressResponseWriter struct {
+ io.Writer
+ http.ResponseWriter
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+}
+
+func (w *compressResponseWriter) WriteHeader(c int) {
+ w.ResponseWriter.Header().Del("Content-Length")
+ w.ResponseWriter.WriteHeader(c)
+}
+
+func (w *compressResponseWriter) Header() http.Header {
+ return w.ResponseWriter.Header()
+}
+
+func (w *compressResponseWriter) Write(b []byte) (int, error) {
+ h := w.ResponseWriter.Header()
+ if h.Get("Content-Type") == "" {
+ h.Set("Content-Type", http.DetectContentType(b))
+ }
+ h.Del("Content-Length")
+
+ return w.Writer.Write(b)
+}
+
+type flusher interface {
+ Flush() error
+}
+
+func (w *compressResponseWriter) Flush() {
+ // Flush compressed data if compressor supports it.
+ if f, ok := w.Writer.(flusher); ok {
+ f.Flush()
+ }
+ // Flush HTTP response.
+ if w.Flusher != nil {
+ w.Flusher.Flush()
+ }
+}
+
+// CompressHandler gzip compresses HTTP responses for clients that support it
+// via the 'Accept-Encoding' header.
+//
+// Compressing TLS traffic may leak the page contents to an attacker if the
+// page contains user input: http://security.stackexchange.com/a/102015/12208
+func CompressHandler(h http.Handler) http.Handler {
+ return CompressHandlerLevel(h, gzip.DefaultCompression)
+}
+
+// CompressHandlerLevel gzip compresses HTTP responses with specified compression level
+// for clients that support it via the 'Accept-Encoding' header.
+//
+// The compression level should be gzip.DefaultCompression, gzip.NoCompression,
+// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive.
+// gzip.DefaultCompression is used in case of invalid compression level.
+func CompressHandlerLevel(h http.Handler, level int) http.Handler {
+ if level < gzip.DefaultCompression || level > gzip.BestCompression {
+ level = gzip.DefaultCompression
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ L:
+ for _, enc := range strings.Split(r.Header.Get("Accept-Encoding"), ",") {
+ switch strings.TrimSpace(enc) {
+ case "gzip":
+ w.Header().Set("Content-Encoding", "gzip")
+ w.Header().Add("Vary", "Accept-Encoding")
+
+ gw, _ := gzip.NewWriterLevel(w, level)
+ defer gw.Close()
+
+ h, hok := w.(http.Hijacker)
+ if !hok { /* w is not Hijacker... oh well... */
+ h = nil
+ }
+
+ f, fok := w.(http.Flusher)
+ if !fok {
+ f = nil
+ }
+
+ cn, cnok := w.(http.CloseNotifier)
+ if !cnok {
+ cn = nil
+ }
+
+ w = &compressResponseWriter{
+ Writer: gw,
+ ResponseWriter: w,
+ Hijacker: h,
+ Flusher: f,
+ CloseNotifier: cn,
+ }
+
+ break L
+ case "deflate":
+ w.Header().Set("Content-Encoding", "deflate")
+ w.Header().Add("Vary", "Accept-Encoding")
+
+ fw, _ := flate.NewWriter(w, level)
+ defer fw.Close()
+
+ h, hok := w.(http.Hijacker)
+ if !hok { /* w is not Hijacker... oh well... */
+ h = nil
+ }
+
+ f, fok := w.(http.Flusher)
+ if !fok {
+ f = nil
+ }
+
+ cn, cnok := w.(http.CloseNotifier)
+ if !cnok {
+ cn = nil
+ }
+
+ w = &compressResponseWriter{
+ Writer: fw,
+ ResponseWriter: w,
+ Hijacker: h,
+ Flusher: f,
+ CloseNotifier: cn,
+ }
+
+ break L
+ }
+ }
+
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go
new file mode 100644
index 0000000..1acf80d
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/cors.go
@@ -0,0 +1,338 @@
+package handlers
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// CORSOption represents a functional option for configuring the CORS middleware.
+type CORSOption func(*cors) error
+
+type cors struct {
+ h http.Handler
+ allowedHeaders []string
+ allowedMethods []string
+ allowedOrigins []string
+ allowedOriginValidator OriginValidator
+ exposedHeaders []string
+ maxAge int
+ ignoreOptions bool
+ allowCredentials bool
+}
+
+// OriginValidator takes an origin string and returns whether or not that origin is allowed.
+type OriginValidator func(string) bool
+
+var (
+ defaultCorsMethods = []string{"GET", "HEAD", "POST"}
+ defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"}
+ // (WebKit/Safari v9 sends the Origin header by default in AJAX requests)
+)
+
+const (
+ corsOptionMethod string = "OPTIONS"
+ corsAllowOriginHeader string = "Access-Control-Allow-Origin"
+ corsExposeHeadersHeader string = "Access-Control-Expose-Headers"
+ corsMaxAgeHeader string = "Access-Control-Max-Age"
+ corsAllowMethodsHeader string = "Access-Control-Allow-Methods"
+ corsAllowHeadersHeader string = "Access-Control-Allow-Headers"
+ corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials"
+ corsRequestMethodHeader string = "Access-Control-Request-Method"
+ corsRequestHeadersHeader string = "Access-Control-Request-Headers"
+ corsOriginHeader string = "Origin"
+ corsVaryHeader string = "Vary"
+ corsOriginMatchAll string = "*"
+)
+
+func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ origin := r.Header.Get(corsOriginHeader)
+ if !ch.isOriginAllowed(origin) {
+ if r.Method != corsOptionMethod || ch.ignoreOptions {
+ ch.h.ServeHTTP(w, r)
+ }
+
+ return
+ }
+
+ if r.Method == corsOptionMethod {
+ if ch.ignoreOptions {
+ ch.h.ServeHTTP(w, r)
+ return
+ }
+
+ if _, ok := r.Header[corsRequestMethodHeader]; !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
+ method := r.Header.Get(corsRequestMethodHeader)
+ if !ch.isMatch(method, ch.allowedMethods) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return
+ }
+
+ requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",")
+ allowedHeaders := []string{}
+ for _, v := range requestHeaders {
+ canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) {
+ continue
+ }
+
+ if !ch.isMatch(canonicalHeader, ch.allowedHeaders) {
+ w.WriteHeader(http.StatusForbidden)
+ return
+ }
+
+ allowedHeaders = append(allowedHeaders, canonicalHeader)
+ }
+
+ if len(allowedHeaders) > 0 {
+ w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ","))
+ }
+
+ if ch.maxAge > 0 {
+ w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge))
+ }
+
+ if !ch.isMatch(method, defaultCorsMethods) {
+ w.Header().Set(corsAllowMethodsHeader, method)
+ }
+ } else {
+ if len(ch.exposedHeaders) > 0 {
+ w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ","))
+ }
+ }
+
+ if ch.allowCredentials {
+ w.Header().Set(corsAllowCredentialsHeader, "true")
+ }
+
+ if len(ch.allowedOrigins) > 1 {
+ w.Header().Set(corsVaryHeader, corsOriginHeader)
+ }
+
+ returnOrigin := origin
+ if ch.allowedOriginValidator == nil && len(ch.allowedOrigins) == 0 {
+ returnOrigin = "*"
+ } else {
+ for _, o := range ch.allowedOrigins {
+ // A configuration of * is different than explicitly setting an allowed
+ // origin. Returning arbitrary origin headers in an access control allow
+ // origin header is unsafe and is not required by any use case.
+ if o == corsOriginMatchAll {
+ returnOrigin = "*"
+ break
+ }
+ }
+ }
+ w.Header().Set(corsAllowOriginHeader, returnOrigin)
+
+ if r.Method == corsOptionMethod {
+ return
+ }
+ ch.h.ServeHTTP(w, r)
+}
+
+// CORS provides Cross-Origin Resource Sharing middleware.
+// Example:
+//
+// import (
+// "net/http"
+//
+// "github.com/gorilla/handlers"
+// "github.com/gorilla/mux"
+// )
+//
+// func main() {
+// r := mux.NewRouter()
+// r.HandleFunc("/users", UserEndpoint)
+// r.HandleFunc("/projects", ProjectEndpoint)
+//
+// // Apply the CORS middleware to our top-level router, with the defaults.
+// http.ListenAndServe(":8000", handlers.CORS()(r))
+// }
+//
+func CORS(opts ...CORSOption) func(http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ ch := parseCORSOptions(opts...)
+ ch.h = h
+ return ch
+ }
+}
+
+func parseCORSOptions(opts ...CORSOption) *cors {
+ ch := &cors{
+ allowedMethods: defaultCorsMethods,
+ allowedHeaders: defaultCorsHeaders,
+ allowedOrigins: []string{},
+ }
+
+ for _, option := range opts {
+ option(ch)
+ }
+
+ return ch
+}
+
+//
+// Functional options for configuring CORS.
+//
+
+// AllowedHeaders adds the provided headers to the list of allowed headers in a
+// CORS request.
+// This is an append operation so the headers Accept, Accept-Language,
+// and Content-Language are always allowed.
+// Content-Type must be explicitly declared if accepting Content-Types other than
+// application/x-www-form-urlencoded, multipart/form-data, or text/plain.
+func AllowedHeaders(headers []string) CORSOption {
+ return func(ch *cors) error {
+ for _, v := range headers {
+ normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if normalizedHeader == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedHeader, ch.allowedHeaders) {
+ ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader)
+ }
+ }
+
+ return nil
+ }
+}
+
+// AllowedMethods can be used to explicitly allow methods in the
+// Access-Control-Allow-Methods header.
+// This is a replacement operation so you must also
+// pass GET, HEAD, and POST if you wish to support those methods.
+func AllowedMethods(methods []string) CORSOption {
+ return func(ch *cors) error {
+ ch.allowedMethods = []string{}
+ for _, v := range methods {
+ normalizedMethod := strings.ToUpper(strings.TrimSpace(v))
+ if normalizedMethod == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedMethod, ch.allowedMethods) {
+ ch.allowedMethods = append(ch.allowedMethods, normalizedMethod)
+ }
+ }
+
+ return nil
+ }
+}
+
+// AllowedOrigins sets the allowed origins for CORS requests, as used in the
+// 'Allow-Access-Control-Origin' HTTP header.
+// Note: Passing in a []string{"*"} will allow any domain.
+func AllowedOrigins(origins []string) CORSOption {
+ return func(ch *cors) error {
+ for _, v := range origins {
+ if v == corsOriginMatchAll {
+ ch.allowedOrigins = []string{corsOriginMatchAll}
+ return nil
+ }
+ }
+
+ ch.allowedOrigins = origins
+ return nil
+ }
+}
+
+// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the
+// 'Allow-Access-Control-Origin' HTTP header.
+func AllowedOriginValidator(fn OriginValidator) CORSOption {
+ return func(ch *cors) error {
+ ch.allowedOriginValidator = fn
+ return nil
+ }
+}
+
+// ExposeHeaders can be used to specify headers that are available
+// and will not be stripped out by the user-agent.
+func ExposedHeaders(headers []string) CORSOption {
+ return func(ch *cors) error {
+ ch.exposedHeaders = []string{}
+ for _, v := range headers {
+ normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if normalizedHeader == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedHeader, ch.exposedHeaders) {
+ ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader)
+ }
+ }
+
+ return nil
+ }
+}
+
+// MaxAge determines the maximum age (in seconds) between preflight requests. A
+// maximum of 10 minutes is allowed. An age above this value will default to 10
+// minutes.
+func MaxAge(age int) CORSOption {
+ return func(ch *cors) error {
+ // Maximum of 10 minutes.
+ if age > 600 {
+ age = 600
+ }
+
+ ch.maxAge = age
+ return nil
+ }
+}
+
+// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead
+// passing them through to the next handler. This is useful when your application
+// or framework has a pre-existing mechanism for responding to OPTIONS requests.
+func IgnoreOptions() CORSOption {
+ return func(ch *cors) error {
+ ch.ignoreOptions = true
+ return nil
+ }
+}
+
+// AllowCredentials can be used to specify that the user agent may pass
+// authentication details along with the request.
+func AllowCredentials() CORSOption {
+ return func(ch *cors) error {
+ ch.allowCredentials = true
+ return nil
+ }
+}
+
+func (ch *cors) isOriginAllowed(origin string) bool {
+ if origin == "" {
+ return false
+ }
+
+ if ch.allowedOriginValidator != nil {
+ return ch.allowedOriginValidator(origin)
+ }
+
+ if len(ch.allowedOrigins) == 0 {
+ return true
+ }
+
+ for _, allowedOrigin := range ch.allowedOrigins {
+ if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ch *cors) isMatch(needle string, haystack []string) bool {
+ for _, v := range haystack {
+ if v == needle {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/gorilla/handlers/doc.go
new file mode 100644
index 0000000..944e5a8
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/doc.go
@@ -0,0 +1,9 @@
+/*
+Package handlers is a collection of handlers (aka "HTTP middleware") for use
+with Go's net/http package (or any framework supporting http.Handler).
+
+The package includes handlers for logging in standardised formats, compressing
+HTTP responses, validating content types and other useful tools for manipulating
+requests and responses.
+*/
+package handlers
diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go
new file mode 100644
index 0000000..d03f2bf
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/handlers.go
@@ -0,0 +1,174 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+// MethodHandler is an http.Handler that dispatches to a handler whose key in the
+// MethodHandler's map matches the name of the HTTP request's method, eg: GET
+//
+// If the request's method is OPTIONS and OPTIONS is not a key in the map then
+// the handler responds with a status of 200 and sets the Allow header to a
+// comma-separated list of available methods.
+//
+// If the request's method doesn't match any of its keys the handler responds
+// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a
+// comma-separated list of available methods.
+type MethodHandler map[string]http.Handler
+
+func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if handler, ok := h[req.Method]; ok {
+ handler.ServeHTTP(w, req)
+ } else {
+ allow := []string{}
+ for k := range h {
+ allow = append(allow, k)
+ }
+ sort.Strings(allow)
+ w.Header().Set("Allow", strings.Join(allow, ", "))
+ if req.Method == "OPTIONS" {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ }
+ }
+}
+
+// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP
+// status code and body size
+type responseLogger struct {
+ w http.ResponseWriter
+ status int
+ size int
+}
+
+func (l *responseLogger) Header() http.Header {
+ return l.w.Header()
+}
+
+func (l *responseLogger) Write(b []byte) (int, error) {
+ size, err := l.w.Write(b)
+ l.size += size
+ return size, err
+}
+
+func (l *responseLogger) WriteHeader(s int) {
+ l.w.WriteHeader(s)
+ l.status = s
+}
+
+func (l *responseLogger) Status() int {
+ return l.status
+}
+
+func (l *responseLogger) Size() int {
+ return l.size
+}
+
+func (l *responseLogger) Flush() {
+ f, ok := l.w.(http.Flusher)
+ if ok {
+ f.Flush()
+ }
+}
+
+type hijackLogger struct {
+ responseLogger
+}
+
+func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ h := l.responseLogger.w.(http.Hijacker)
+ conn, rw, err := h.Hijack()
+ if err == nil && l.responseLogger.status == 0 {
+ // The status will be StatusSwitchingProtocols if there was no error and
+ // WriteHeader has not been called yet
+ l.responseLogger.status = http.StatusSwitchingProtocols
+ }
+ return conn, rw, err
+}
+
+type closeNotifyWriter struct {
+ loggingResponseWriter
+ http.CloseNotifier
+}
+
+type hijackCloseNotifier struct {
+ loggingResponseWriter
+ http.Hijacker
+ http.CloseNotifier
+}
+
+// isContentType validates the Content-Type header matches the supplied
+// contentType. That is, its type and subtype match.
+func isContentType(h http.Header, contentType string) bool {
+ ct := h.Get("Content-Type")
+ if i := strings.IndexRune(ct, ';'); i != -1 {
+ ct = ct[0:i]
+ }
+ return ct == contentType
+}
+
+// ContentTypeHandler wraps and returns a http.Handler, validating the request
+// content type is compatible with the contentTypes list. It writes a HTTP 415
+// error if that fails.
+//
+// Only PUT, POST, and PATCH requests are considered.
+func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ for _, ct := range contentTypes {
+ if isContentType(r.Header, ct) {
+ h.ServeHTTP(w, r)
+ return
+ }
+ }
+ http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType)
+ })
+}
+
+const (
+ // HTTPMethodOverrideHeader is a commonly used
+ // http header to override a request method.
+ HTTPMethodOverrideHeader = "X-HTTP-Method-Override"
+ // HTTPMethodOverrideFormKey is a commonly used
+ // HTML form key to override a request method.
+ HTTPMethodOverrideFormKey = "_method"
+)
+
+// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for
+// the X-HTTP-Method-Override header or the _method form key, and overrides (if
+// valid) request.Method with its value.
+//
+// This is especially useful for HTTP clients that don't support many http verbs.
+// It isn't secure to override e.g a GET to a POST, so only POST requests are
+// considered. Likewise, the override method can only be a "write" method: PUT,
+// PATCH or DELETE.
+//
+// Form method takes precedence over header method.
+func HTTPMethodOverrideHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "POST" {
+ om := r.FormValue(HTTPMethodOverrideFormKey)
+ if om == "" {
+ om = r.Header.Get(HTTPMethodOverrideHeader)
+ }
+ if om == "PUT" || om == "PATCH" || om == "DELETE" {
+ r.Method = om
+ }
+ }
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/handlers/handlers_go18.go b/vendor/github.com/gorilla/handlers/handlers_go18.go
new file mode 100644
index 0000000..35eb8d4
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/handlers_go18.go
@@ -0,0 +1,21 @@
+// +build go1.8
+
+package handlers
+
+import (
+ "fmt"
+ "net/http"
+)
+
+type loggingResponseWriter interface {
+ commonLoggingResponseWriter
+ http.Pusher
+}
+
+func (l *responseLogger) Push(target string, opts *http.PushOptions) error {
+ p, ok := l.w.(http.Pusher)
+ if !ok {
+ return fmt.Errorf("responseLogger does not implement http.Pusher")
+ }
+ return p.Push(target, opts)
+}
diff --git a/vendor/github.com/gorilla/handlers/handlers_pre18.go b/vendor/github.com/gorilla/handlers/handlers_pre18.go
new file mode 100644
index 0000000..197836a
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/handlers_pre18.go
@@ -0,0 +1,7 @@
+// +build !go1.8
+
+package handlers
+
+type loggingResponseWriter interface {
+ commonLoggingResponseWriter
+}
diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go
new file mode 100644
index 0000000..cbd182f
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/logging.go
@@ -0,0 +1,252 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+ "unicode/utf8"
+)
+
+// Logging
+
+// FormatterParams is the structure any formatter will be handed when time to log comes
+type LogFormatterParams struct {
+ Request *http.Request
+ URL url.URL
+ TimeStamp time.Time
+ StatusCode int
+ Size int
+}
+
+// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler
+type LogFormatter func(writer io.Writer, params LogFormatterParams)
+
+// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its
+// friends
+
+type loggingHandler struct {
+ writer io.Writer
+ handler http.Handler
+ formatter LogFormatter
+}
+
+func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ t := time.Now()
+ logger := makeLogger(w)
+ url := *req.URL
+
+ h.handler.ServeHTTP(logger, req)
+
+ params := LogFormatterParams{
+ Request: req,
+ URL: url,
+ TimeStamp: t,
+ StatusCode: logger.Status(),
+ Size: logger.Size(),
+ }
+
+ h.formatter(h.writer, params)
+}
+
+func makeLogger(w http.ResponseWriter) loggingResponseWriter {
+ var logger loggingResponseWriter = &responseLogger{w: w, status: http.StatusOK}
+ if _, ok := w.(http.Hijacker); ok {
+ logger = &hijackLogger{responseLogger{w: w, status: http.StatusOK}}
+ }
+ h, ok1 := logger.(http.Hijacker)
+ c, ok2 := w.(http.CloseNotifier)
+ if ok1 && ok2 {
+ return hijackCloseNotifier{logger, h, c}
+ }
+ if ok2 {
+ return &closeNotifyWriter{logger, c}
+ }
+ return logger
+}
+
+type commonLoggingResponseWriter interface {
+ http.ResponseWriter
+ http.Flusher
+ Status() int
+ Size() int
+}
+
+const lowerhex = "0123456789abcdef"
+
+func appendQuoted(buf []byte, s string) []byte {
+ var runeTmp [utf8.UTFMax]byte
+ for width := 0; len(s) > 0; s = s[width:] {
+ r := rune(s[0])
+ width = 1
+ if r >= utf8.RuneSelf {
+ r, width = utf8.DecodeRuneInString(s)
+ }
+ if width == 1 && r == utf8.RuneError {
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ continue
+ }
+ if r == rune('"') || r == '\\' { // always backslashed
+ buf = append(buf, '\\')
+ buf = append(buf, byte(r))
+ continue
+ }
+ if strconv.IsPrint(r) {
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+ continue
+ }
+ switch r {
+ case '\a':
+ buf = append(buf, `\a`...)
+ case '\b':
+ buf = append(buf, `\b`...)
+ case '\f':
+ buf = append(buf, `\f`...)
+ case '\n':
+ buf = append(buf, `\n`...)
+ case '\r':
+ buf = append(buf, `\r`...)
+ case '\t':
+ buf = append(buf, `\t`...)
+ case '\v':
+ buf = append(buf, `\v`...)
+ default:
+ switch {
+ case r < ' ':
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ case r > utf8.MaxRune:
+ r = 0xFFFD
+ fallthrough
+ case r < 0x10000:
+ buf = append(buf, `\u`...)
+ for s := 12; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ default:
+ buf = append(buf, `\U`...)
+ for s := 28; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ }
+ }
+ }
+ return buf
+
+}
+
+// buildCommonLogLine builds a log entry for req in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte {
+ username := "-"
+ if url.User != nil {
+ if name := url.User.Username(); name != "" {
+ username = name
+ }
+ }
+
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+
+ if err != nil {
+ host = req.RemoteAddr
+ }
+
+ uri := req.RequestURI
+
+ // Requests using the CONNECT method over HTTP/2.0 must use
+ // the authority field (aka r.Host) to identify the target.
+ // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT
+ if req.ProtoMajor == 2 && req.Method == "CONNECT" {
+ uri = req.Host
+ }
+ if uri == "" {
+ uri = url.RequestURI()
+ }
+
+ buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2)
+ buf = append(buf, host...)
+ buf = append(buf, " - "...)
+ buf = append(buf, username...)
+ buf = append(buf, " ["...)
+ buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...)
+ buf = append(buf, `] "`...)
+ buf = append(buf, req.Method...)
+ buf = append(buf, " "...)
+ buf = appendQuoted(buf, uri)
+ buf = append(buf, " "...)
+ buf = append(buf, req.Proto...)
+ buf = append(buf, `" `...)
+ buf = append(buf, strconv.Itoa(status)...)
+ buf = append(buf, " "...)
+ buf = append(buf, strconv.Itoa(size)...)
+ return buf
+}
+
+// writeLog writes a log entry for req to w in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeLog(writer io.Writer, params LogFormatterParams) {
+ buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
+ buf = append(buf, '\n')
+ writer.Write(buf)
+}
+
+// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeCombinedLog(writer io.Writer, params LogFormatterParams) {
+ buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
+ buf = append(buf, ` "`...)
+ buf = appendQuoted(buf, params.Request.Referer())
+ buf = append(buf, `" "`...)
+ buf = appendQuoted(buf, params.Request.UserAgent())
+ buf = append(buf, '"', '\n')
+ writer.Write(buf)
+}
+
+// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Combined Log Format.
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -
+func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h, writeCombinedLog}
+}
+
+// LoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Common Log Format (CLF).
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -
+//
+// Example:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("This is a catch-all route"))
+// })
+// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
+// http.ListenAndServe(":1123", loggedRouter)
+//
+func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h, writeLog}
+}
+
+// CustomLoggingHandler provides a way to supply a custom log formatter
+// while taking advantage of the mechanisms in this package
+func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler {
+ return loggingHandler{out, h, f}
+}
diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go
new file mode 100644
index 0000000..0be750f
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/proxy_headers.go
@@ -0,0 +1,120 @@
+package handlers
+
+import (
+ "net/http"
+ "regexp"
+ "strings"
+)
+
+var (
+ // De-facto standard header keys.
+ xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
+ xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host")
+ xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto")
+ xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme")
+ xRealIP = http.CanonicalHeaderKey("X-Real-IP")
+)
+
+var (
+ // RFC7239 defines a new "Forwarded: " header designed to replace the
+ // existing use of X-Forwarded-* headers.
+ // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43
+ forwarded = http.CanonicalHeaderKey("Forwarded")
+ // Allows for a sub-match of the first value after 'for=' to the next
+ // comma, semi-colon or space. The match is case-insensitive.
+ forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`)
+ // Allows for a sub-match for the first instance of scheme (http|https)
+ // prefixed by 'proto='. The match is case-insensitive.
+ protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`)
+)
+
+// ProxyHeaders inspects common reverse proxy headers and sets the corresponding
+// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP
+// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme
+// for the scheme (http|https) and the RFC7239 Forwarded header, which may
+// include both client IPs and schemes.
+//
+// NOTE: This middleware should only be used when behind a reverse
+// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are
+// configured not to) strip these headers from client requests, or where these
+// headers are accepted "as is" from a remote client (e.g. when Go is not behind
+// a proxy), can manifest as a vulnerability if your application uses these
+// headers for validating the 'trustworthiness' of a request.
+func ProxyHeaders(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ // Set the remote IP with the value passed from the proxy.
+ if fwd := getIP(r); fwd != "" {
+ r.RemoteAddr = fwd
+ }
+
+ // Set the scheme (proto) with the value passed from the proxy.
+ if scheme := getScheme(r); scheme != "" {
+ r.URL.Scheme = scheme
+ }
+ // Set the host with the value passed by the proxy
+ if r.Header.Get(xForwardedHost) != "" {
+ r.Host = r.Header.Get(xForwardedHost)
+ }
+ // Call the next handler in the chain.
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
+// Forwarded headers (in that order).
+func getIP(r *http.Request) string {
+ var addr string
+
+ if fwd := r.Header.Get(xForwardedFor); fwd != "" {
+ // Only grab the first (client) address. Note that '192.168.0.1,
+ // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
+ // the first may represent forwarding proxies earlier in the chain.
+ s := strings.Index(fwd, ", ")
+ if s == -1 {
+ s = len(fwd)
+ }
+ addr = fwd[:s]
+ } else if fwd := r.Header.Get(xRealIP); fwd != "" {
+ // X-Real-IP should only contain one IP address (the client making the
+ // request).
+ addr = fwd
+ } else if fwd := r.Header.Get(forwarded); fwd != "" {
+ // match should contain at least two elements if the protocol was
+ // specified in the Forwarded header. The first element will always be
+ // the 'for=' capture, which we ignore. In the case of multiple IP
+ // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only
+ // extract the first, which should be the client IP.
+ if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
+ // IPv6 addresses in Forwarded headers are quoted-strings. We strip
+ // these quotes.
+ addr = strings.Trim(match[1], `"`)
+ }
+ }
+
+ return addr
+}
+
+// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239
+// Forwarded headers (in that order).
+func getScheme(r *http.Request) string {
+ var scheme string
+
+ // Retrieve the scheme from X-Forwarded-Proto.
+ if proto := r.Header.Get(xForwardedProto); proto != "" {
+ scheme = strings.ToLower(proto)
+ } else if proto = r.Header.Get(xForwardedScheme); proto != "" {
+ scheme = strings.ToLower(proto)
+ } else if proto = r.Header.Get(forwarded); proto != "" {
+ // match should contain at least two elements if the protocol was
+ // specified in the Forwarded header. The first element will always be
+ // the 'proto=' capture, which we ignore. In the case of multiple proto
+ // parameters (invalid) we only extract the first.
+ if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 {
+ scheme = strings.ToLower(match[1])
+ }
+ }
+
+ return scheme
+}
diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go
new file mode 100644
index 0000000..b1be9dc
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/recovery.go
@@ -0,0 +1,91 @@
+package handlers
+
+import (
+ "log"
+ "net/http"
+ "runtime/debug"
+)
+
+// RecoveryHandlerLogger is an interface used by the recovering handler to print logs.
+type RecoveryHandlerLogger interface {
+ Println(...interface{})
+}
+
+type recoveryHandler struct {
+ handler http.Handler
+ logger RecoveryHandlerLogger
+ printStack bool
+}
+
+// RecoveryOption provides a functional approach to define
+// configuration for a handler; such as setting the logging
+// whether or not to print strack traces on panic.
+type RecoveryOption func(http.Handler)
+
+func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler {
+ for _, option := range opts {
+ option(h)
+ }
+
+ return h
+}
+
+// RecoveryHandler is HTTP middleware that recovers from a panic,
+// logs the panic, writes http.StatusInternalServerError, and
+// continues to the next handler.
+//
+// Example:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// panic("Unexpected error!")
+// })
+//
+// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r))
+func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ r := &recoveryHandler{handler: h}
+ return parseRecoveryOptions(r, opts...)
+ }
+}
+
+// RecoveryLogger is a functional option to override
+// the default logger
+func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption {
+ return func(h http.Handler) {
+ r := h.(*recoveryHandler)
+ r.logger = logger
+ }
+}
+
+// PrintRecoveryStack is a functional option to enable
+// or disable printing stack traces on panic.
+func PrintRecoveryStack(print bool) RecoveryOption {
+ return func(h http.Handler) {
+ r := h.(*recoveryHandler)
+ r.printStack = print
+ }
+}
+
+func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ defer func() {
+ if err := recover(); err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ h.log(err)
+ }
+ }()
+
+ h.handler.ServeHTTP(w, req)
+}
+
+func (h recoveryHandler) log(v ...interface{}) {
+ if h.logger != nil {
+ h.logger.Println(v...)
+ } else {
+ log.Println(v...)
+ }
+
+ if h.printStack {
+ debug.PrintStack()
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml
new file mode 100644
index 0000000..ad0935d
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/.travis.yml
@@ -0,0 +1,23 @@
+language: go
+sudo: false
+
+matrix:
+ include:
+ - go: 1.5.x
+ - go: 1.6.x
+ - go: 1.7.x
+ - go: 1.8.x
+ - go: 1.9.x
+ - go: 1.10.x
+ - go: tip
+ allow_failures:
+ - go: tip
+
+install:
+ - # Skip
+
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - go tool vet .
+ - go test -v -race ./...
diff --git a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000..232be82
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
@@ -0,0 +1,11 @@
+**What version of Go are you running?** (Paste the output of `go version`)
+
+
+**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
+
+
+**Describe your problem** (and what you have tried so far)
+
+
+**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)
+
diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE
new file mode 100644
index 0000000..0e5fb87
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000..e424397
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,649 @@
+# gorilla/mux
+
+[](https://godoc.org/github.com/gorilla/mux)
+[](https://travis-ci.org/gorilla/mux)
+[](https://sourcegraph.com/github.com/gorilla/mux?badge)
+
+
+
+http://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts, paths and query values can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Static Files](#static-files)
+* [Registered URLs](#registered-urls)
+* [Walking Routes](#walking-routes)
+* [Graceful Shutdown](#graceful-shutdown)
+* [Middleware](#middleware)
+* [Testing Handlers](#testing-handlers)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.domain.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+ Host("www.example.com").
+ Methods("GET").
+ Schemes("http")
+```
+
+Routes are tested in the order they were added to the router. If two routes match, the first one wins:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/specific", specificHandler)
+r.PathPrefix("/").Handler(catchAllHandler)
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/\*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+ var dir string
+
+ flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+ flag.Parse()
+ r := mux.NewRouter()
+
+ // This will serve files under http://localhost:8000/static/
+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
+
+ log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host and query value variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
+url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42",
+ "filter", "gorilla")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.domain.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.domain.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+// "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+```
+
+### Walking Routes
+
+The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
+the following prints all of the registered routes:
+
+```go
+package main
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ return
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.HandleFunc("/products", handler).Methods("POST")
+ r.HandleFunc("/articles", handler).Methods("GET")
+ r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
+ r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
+ err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+ pathTemplate, err := route.GetPathTemplate()
+ if err == nil {
+ fmt.Println("ROUTE:", pathTemplate)
+ }
+ pathRegexp, err := route.GetPathRegexp()
+ if err == nil {
+ fmt.Println("Path regexp:", pathRegexp)
+ }
+ queriesTemplates, err := route.GetQueriesTemplates()
+ if err == nil {
+ fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
+ }
+ queriesRegexps, err := route.GetQueriesRegexp()
+ if err == nil {
+ fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
+ }
+ methods, err := route.GetMethods()
+ if err == nil {
+ fmt.Println("Methods:", strings.Join(methods, ","))
+ }
+ fmt.Println()
+ return nil
+ })
+
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ http.Handle("/", r)
+}
+```
+
+### Graceful Shutdown
+
+Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
+
+```go
+package main
+
+import (
+ "context"
+ "flag"
+ "log"
+ "net/http"
+ "os"
+ "os/signal"
+ "time"
+
+ "github.com/gorilla/mux"
+)
+
+func main() {
+ var wait time.Duration
+ flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
+ flag.Parse()
+
+ r := mux.NewRouter()
+ // Add your routes as needed
+
+ srv := &http.Server{
+ Addr: "0.0.0.0:8080",
+ // Good practice to set timeouts to avoid Slowloris attacks.
+ WriteTimeout: time.Second * 15,
+ ReadTimeout: time.Second * 15,
+ IdleTimeout: time.Second * 60,
+ Handler: r, // Pass our instance of gorilla/mux in.
+ }
+
+ // Run our server in a goroutine so that it doesn't block.
+ go func() {
+ if err := srv.ListenAndServe(); err != nil {
+ log.Println(err)
+ }
+ }()
+
+ c := make(chan os.Signal, 1)
+ // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
+ // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
+ signal.Notify(c, os.Interrupt)
+
+ // Block until we receive our signal.
+ <-c
+
+ // Create a deadline to wait for.
+ ctx, cancel := context.WithTimeout(context.Background(), wait)
+ defer cancel()
+ // Doesn't block if no connections, but will otherwise wait
+ // until the timeout deadline.
+ srv.Shutdown(ctx)
+ // Optionally, you could run srv.Shutdown in a goroutine and block on
+ // <-ctx.Done() if your application should wait for other services
+ // to finalize based on context cancellation.
+ log.Println("shutting down")
+ os.Exit(0)
+}
+```
+
+### Middleware
+
+Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
+Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
+
+Mux middlewares are defined using the de facto standard type:
+
+```go
+type MiddlewareFunc func(http.Handler) http.Handler
+```
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+```go
+func loggingMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+}
+```
+
+Middlewares can be added to a router using `Router.Use()`:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+r.Use(loggingMiddleware)
+```
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+```go
+// Define our struct
+type authenticationMiddleware struct {
+ tokenUsers map[string]string
+}
+
+// Initialize it somewhere
+func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+}
+
+// Middleware function, which will be called for each request
+func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ // Pass down the request to the next middleware (or final handler)
+ next.ServeHTTP(w, r)
+ } else {
+ // Write an error and stop the handler chain
+ http.Error(w, "Forbidden", http.StatusForbidden)
+ }
+ })
+}
+```
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+
+amw := authenticationMiddleware{}
+amw.Populate()
+
+r.Use(amw.Middleware)
+```
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
+
+### Testing Handlers
+
+Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
+
+First, our simple HTTP handler:
+
+```go
+// endpoints.go
+package main
+
+func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
+ // A very simple health check.
+ w.WriteHeader(http.StatusOK)
+ w.Header().Set("Content-Type", "application/json")
+
+ // In the future we could report back on the status of our DB, or our cache
+ // (e.g. Redis) by performing a simple PING, and include them in the response.
+ io.WriteString(w, `{"alive": true}`)
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/health", HealthCheckHandler)
+
+ log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test code:
+
+```go
+// endpoints_test.go
+package main
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestHealthCheckHandler(t *testing.T) {
+ // Create a request to pass to our handler. We don't have any query parameters for now, so we'll
+ // pass 'nil' as the third parameter.
+ req, err := http.NewRequest("GET", "/health", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
+ rr := httptest.NewRecorder()
+ handler := http.HandlerFunc(HealthCheckHandler)
+
+ // Our handlers satisfy http.Handler, so we can call their ServeHTTP method
+ // directly and pass in our Request and ResponseRecorder.
+ handler.ServeHTTP(rr, req)
+
+ // Check the status code is what we expect.
+ if status := rr.Code; status != http.StatusOK {
+ t.Errorf("handler returned wrong status code: got %v want %v",
+ status, http.StatusOK)
+ }
+
+ // Check the response body is what we expect.
+ expected := `{"alive": true}`
+ if rr.Body.String() != expected {
+ t.Errorf("handler returned unexpected body: got %v want %v",
+ rr.Body.String(), expected)
+ }
+}
+```
+
+In the case that our routes have [variables](#examples), we can pass those in the request. We could write
+[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
+possible route variables as needed.
+
+```go
+// endpoints.go
+func main() {
+ r := mux.NewRouter()
+ // A route with a route variable:
+ r.HandleFunc("/metrics/{type}", MetricsHandler)
+
+ log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test file, with a table-driven test of `routeVariables`:
+
+```go
+// endpoints_test.go
+func TestMetricsHandler(t *testing.T) {
+ tt := []struct{
+ routeVariable string
+ shouldPass bool
+ }{
+ {"goroutines", true},
+ {"heap", true},
+ {"counters", true},
+ {"queries", true},
+ {"adhadaeqm3k", false},
+ }
+
+ for _, tc := range tt {
+ path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
+ req, err := http.NewRequest("GET", path, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rr := httptest.NewRecorder()
+
+ // Need to create a router that we can pass the request through so that the vars will be added to the context
+ router := mux.NewRouter()
+ router.HandleFunc("/metrics/{type}", MetricsHandler)
+ router.ServeHTTP(rr, req)
+
+ // In this case, our MetricsHandler returns a non-200 response
+ // for a route variable it doesn't know about.
+ if rr.Code == http.StatusOK && !tc.shouldPass {
+ t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
+ tc.routeVariable, rr.Code, http.StatusOK)
+ }
+ }
+}
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+ "net/http"
+ "log"
+ "github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+ r := mux.NewRouter()
+ // Routes consist of a path and a handler function.
+ r.HandleFunc("/", YourHandler)
+
+ // Bind to a port and pass our router in
+ log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go
new file mode 100644
index 0000000..d7adaa8
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/context_gorilla.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+package mux
+
+import (
+ "net/http"
+
+ "github.com/gorilla/context"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+ return context.Get(r, key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+ if val == nil {
+ return r
+ }
+
+ context.Set(r, key, val)
+ return r
+}
+
+func contextClear(r *http.Request) {
+ context.Clear(r)
+}
diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go
new file mode 100644
index 0000000..209cbea
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/context_native.go
@@ -0,0 +1,24 @@
+// +build go1.7
+
+package mux
+
+import (
+ "context"
+ "net/http"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+ return r.Context().Value(key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+ if val == nil {
+ return r
+ }
+
+ return r.WithContext(context.WithValue(r.Context(), key, val))
+}
+
+func contextClear(r *http.Request) {
+ return
+}
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000..38957de
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,306 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+ * Requests can be matched based on URL host, path, path prefix, schemes,
+ header and query values, HTTP methods or using custom matchers.
+ * URL hosts, paths and query values can have variables with an optional
+ regular expression.
+ * Registered URLs can be built, or "reversed", which helps maintaining
+ references to resources.
+ * Routes can be used as subrouters: nested routes are only tested if the
+ parent route matches. This is useful to define groups of routes that
+ share common conditions like a host, a path prefix or other repeated
+ attributes. As a bonus, this optimizes request matching.
+ * It implements the http.Handler interface so it is compatible with the
+ standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+ func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+ }
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/products/{key}", ProductHandler)
+ r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+ r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+ vars := mux.Vars(request)
+ category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+ r := mux.NewRouter()
+ // Only matches if domain is "www.example.com".
+ r.Host("www.example.com")
+ // Matches a dynamic subdomain.
+ r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+ r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+ r.Methods("GET", "POST")
+
+...or URL schemes:
+
+ r.Schemes("https")
+
+...or header values:
+
+ r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+ r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+ r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+ })
+
+...and finally, it is possible to combine several matchers in a single route:
+
+ r.HandleFunc("/products", ProductsHandler).
+ Host("www.example.com").
+ Methods("GET").
+ Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+ r := mux.NewRouter()
+ s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+ s.HandleFunc("/products/", ProductsHandler)
+ s.HandleFunc("/products/{key}", ProductHandler)
+ s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+ r := mux.NewRouter()
+ s := r.PathPrefix("/products").Subrouter()
+ // "/products/"
+ s.HandleFunc("/", ProductsHandler)
+ // "/products/{key}/"
+ s.HandleFunc("/{key}/", ProductHandler)
+ // "/products/{key}/details"
+ s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+ func main() {
+ var dir string
+
+ flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+ flag.Parse()
+ r := mux.NewRouter()
+
+ // This will serve files under http://localhost:8000/static/
+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
+
+ log.Fatal(srv.ListenAndServe())
+ }
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+ url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+ "/articles/technology/42"
+
+This also works for host and query value variables:
+
+ r := mux.NewRouter()
+ r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42",
+ "filter", "gorilla")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+ r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+ // "http://news.domain.com/"
+ host, err := r.Get("article").URLHost("subdomain", "news")
+
+ // "/articles/technology/42"
+ path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+ r := mux.NewRouter()
+ s := r.Host("{subdomain}.domain.com").Subrouter()
+ s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+
+Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
+
+ type MiddlewareFunc func(http.Handler) http.Handler
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+ func simpleMw(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+ }
+
+Middlewares can be added to a router using `Router.Use()`:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.Use(simpleMw)
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+ // Define our struct
+ type authenticationMiddleware struct {
+ tokenUsers map[string]string
+ }
+
+ // Initialize it somewhere
+ func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+ }
+
+ // Middleware function, which will be called for each request
+ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ next.ServeHTTP(w, r)
+ } else {
+ http.Error(w, "Forbidden", http.StatusForbidden)
+ }
+ })
+ }
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+
+ amw := authenticationMiddleware{}
+ amw.Populate()
+
+ r.Use(amw.Middleware)
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
+
+*/
+package mux
diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go
new file mode 100644
index 0000000..ceb812c
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/middleware.go
@@ -0,0 +1,72 @@
+package mux
+
+import (
+ "net/http"
+ "strings"
+)
+
+// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
+// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
+// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
+type MiddlewareFunc func(http.Handler) http.Handler
+
+// middleware interface is anything which implements a MiddlewareFunc named Middleware.
+type middleware interface {
+ Middleware(handler http.Handler) http.Handler
+}
+
+// Middleware allows MiddlewareFunc to implement the middleware interface.
+func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
+ return mw(handler)
+}
+
+// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) Use(mwf ...MiddlewareFunc) {
+ for _, fn := range mwf {
+ r.middlewares = append(r.middlewares, fn)
+ }
+}
+
+// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) useInterface(mw middleware) {
+ r.middlewares = append(r.middlewares, mw)
+}
+
+// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header
+// on a request, by matching routes based only on paths. It also handles
+// OPTIONS requests, by settings Access-Control-Allow-Methods, and then
+// returning without calling the next http handler.
+func CORSMethodMiddleware(r *Router) MiddlewareFunc {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ var allMethods []string
+
+ err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
+ for _, m := range route.matchers {
+ if _, ok := m.(*routeRegexp); ok {
+ if m.Match(req, &RouteMatch{}) {
+ methods, err := route.GetMethods()
+ if err != nil {
+ return err
+ }
+
+ allMethods = append(allMethods, methods...)
+ }
+ break
+ }
+ }
+ return nil
+ })
+
+ if err == nil {
+ w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ","))
+
+ if req.Method == "OPTIONS" {
+ return
+ }
+ }
+
+ next.ServeHTTP(w, req)
+ })
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..4bbafa5
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,588 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "path"
+ "regexp"
+)
+
+var (
+ // ErrMethodMismatch is returned when the method in the request does not match
+ // the method defined against the route.
+ ErrMethodMismatch = errors.New("method is not allowed")
+ // ErrNotFound is returned when no route match is found.
+ ErrNotFound = errors.New("no matching route was found")
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+ return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+// var router = mux.NewRouter()
+//
+// func main() {
+// http.Handle("/", router)
+// }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+// func init() {
+// http.Handle("/", router)
+// }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+ // Configurable Handler to be used when no route matches.
+ NotFoundHandler http.Handler
+
+ // Configurable Handler to be used when the request method does not match the route.
+ MethodNotAllowedHandler http.Handler
+
+ // Parent route, if this is a subrouter.
+ parent parentRoute
+ // Routes to be matched, in order.
+ routes []*Route
+ // Routes by name for URL building.
+ namedRoutes map[string]*Route
+ // See Router.StrictSlash(). This defines the flag for new routes.
+ strictSlash bool
+ // See Router.SkipClean(). This defines the flag for new routes.
+ skipClean bool
+ // If true, do not clear the request context after handling the request.
+ // This has no effect when go1.7+ is used, since the context is stored
+ // on the request itself.
+ KeepContext bool
+ // see Router.UseEncodedPath(). This defines a flag for all routes.
+ useEncodedPath bool
+ // Slice of middlewares to be called after a match is found
+ middlewares []middleware
+}
+
+// Match attempts to match the given request against the router's registered routes.
+//
+// If the request matches a route of this router or one of its subrouters the Route,
+// Handler, and Vars fields of the the match argument are filled and this function
+// returns true.
+//
+// If the request does not match any of this router's or its subrouters' routes
+// then this function returns false. If available, a reason for the match failure
+// will be filled in the match argument's MatchErr field. If the match failure type
+// (eg: not found) has a registered handler, the handler is assigned to the Handler
+// field of the match argument.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+ for _, route := range r.routes {
+ if route.Match(req, match) {
+ // Build middleware chain if no error was found
+ if match.MatchErr == nil {
+ for i := len(r.middlewares) - 1; i >= 0; i-- {
+ match.Handler = r.middlewares[i].Middleware(match.Handler)
+ }
+ }
+ return true
+ }
+ }
+
+ if match.MatchErr == ErrMethodMismatch {
+ if r.MethodNotAllowedHandler != nil {
+ match.Handler = r.MethodNotAllowedHandler
+ return true
+ }
+
+ return false
+ }
+
+ // Closest match for a router (includes sub-routers)
+ if r.NotFoundHandler != nil {
+ match.Handler = r.NotFoundHandler
+ match.MatchErr = ErrNotFound
+ return true
+ }
+
+ match.MatchErr = ErrNotFound
+ return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if !r.skipClean {
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ // Clean path to canonical form and redirect.
+ if p := cleanPath(path); p != path {
+
+ // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+ // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
+ // http://code.google.com/p/go/issues/detail?id=5252
+ url := *req.URL
+ url.Path = p
+ p = url.String()
+
+ w.Header().Set("Location", p)
+ w.WriteHeader(http.StatusMovedPermanently)
+ return
+ }
+ }
+ var match RouteMatch
+ var handler http.Handler
+ if r.Match(req, &match) {
+ handler = match.Handler
+ req = setVars(req, match.Vars)
+ req = setCurrentRoute(req, match.Route)
+ }
+
+ if handler == nil && match.MatchErr == ErrMethodMismatch {
+ handler = methodNotAllowedHandler()
+ }
+
+ if handler == nil {
+ handler = http.NotFoundHandler()
+ }
+
+ if !r.KeepContext {
+ defer contextClear(req)
+ }
+
+ handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+ return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+ return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will perform a redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
+// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
+// request will be made as a GET by most clients. Use middleware or client settings
+// to modify this behaviour as needed.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+ r.strictSlash = value
+ return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+ r.skipClean = value
+ return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+ r.useEncodedPath = true
+ return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+func (r *Router) getBuildScheme() string {
+ if r.parent != nil {
+ return r.parent.getBuildScheme()
+ }
+ return ""
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+ if r.namedRoutes == nil {
+ if r.parent != nil {
+ r.namedRoutes = r.parent.getNamedRoutes()
+ } else {
+ r.namedRoutes = make(map[string]*Route)
+ }
+ }
+ return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+ if r.parent != nil {
+ return r.parent.getRegexpGroup()
+ }
+ return nil
+}
+
+func (r *Router) buildVars(m map[string]string) map[string]string {
+ if r.parent != nil {
+ m = r.parent.buildVars(m)
+ }
+ return m
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+ route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
+ r.routes = append(r.routes, route)
+ return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+ return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+ *http.Request)) *Route {
+ return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+ return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+ return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+ return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+ return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+ return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+ return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+ return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+ return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+ return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+ return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+ for _, t := range r.routes {
+ err := walkFn(t, r, ancestors)
+ if err == SkipRouter {
+ continue
+ }
+ if err != nil {
+ return err
+ }
+ for _, sr := range t.matchers {
+ if h, ok := sr.(*Router); ok {
+ ancestors = append(ancestors, t)
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ ancestors = ancestors[:len(ancestors)-1]
+ }
+ }
+ if h, ok := t.handler.(*Router); ok {
+ ancestors = append(ancestors, t)
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ ancestors = ancestors[:len(ancestors)-1]
+ }
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+ Route *Route
+ Handler http.Handler
+ Vars map[string]string
+
+ // MatchErr is set to appropriate matching error
+ // It is set to ErrMethodMismatch if there is a mismatch in
+ // the request method and route method
+ MatchErr error
+}
+
+type contextKey int
+
+const (
+ varsKey contextKey = iota
+ routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+ if rv := contextGet(r, varsKey); rv != nil {
+ return rv.(map[string]string)
+ }
+ return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns, unless the KeepContext option is set on the
+// Router.
+func CurrentRoute(r *http.Request) *Route {
+ if rv := contextGet(r, routeKey); rv != nil {
+ return rv.(*Route)
+ }
+ return nil
+}
+
+func setVars(r *http.Request, val interface{}) *http.Request {
+ return contextSet(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
+ return contextSet(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+
+ return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+ for _, v1 := range s1 {
+ for _, v2 := range s2 {
+ if v1 == v2 {
+ return fmt.Errorf("mux: duplicated route variable %q", v2)
+ }
+ }
+ }
+ return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+ length := len(pairs)
+ if length%2 != 0 {
+ return length, fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ }
+ return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]string, length/2)
+ for i := 0; i < length; i += 2 {
+ m[pairs[i]] = pairs[i+1]
+ }
+ return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string parameters to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]*regexp.Regexp, length/2)
+ for i := 0; i < length; i += 2 {
+ regex, err := regexp.Compile(pairs[i+1])
+ if err != nil {
+ return nil, err
+ }
+ m[pairs[i]] = regex
+ }
+ return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+ for _, v := range arr {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != "" {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v == value {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != nil {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v.MatchString(value) {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// methodNotAllowed replies to the request with an HTTP status code 405.
+func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+}
+
+// methodNotAllowedHandler returns a simple request handler
+// that replies to each request with a status code 405.
+func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..2b57e56
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,332 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type routeRegexpOptions struct {
+ strictSlash bool
+ useEncodedPath bool
+}
+
+type regexpType int
+
+const (
+ regexpTypePath regexpType = 0
+ regexpTypeHost regexpType = 1
+ regexpTypePrefix regexpType = 2
+ regexpTypeQuery regexpType = 3
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
+ // Check if it is well-formed.
+ idxs, errBraces := braceIndices(tpl)
+ if errBraces != nil {
+ return nil, errBraces
+ }
+ // Backup the original.
+ template := tpl
+ // Now let's parse it.
+ defaultPattern := "[^/]+"
+ if typ == regexpTypeQuery {
+ defaultPattern = ".*"
+ } else if typ == regexpTypeHost {
+ defaultPattern = "[^.]+"
+ }
+ // Only match strict slash if not matching
+ if typ != regexpTypePath {
+ options.strictSlash = false
+ }
+ // Set a flag for strictSlash.
+ endSlash := false
+ if options.strictSlash && strings.HasSuffix(tpl, "/") {
+ tpl = tpl[:len(tpl)-1]
+ endSlash = true
+ }
+ varsN := make([]string, len(idxs)/2)
+ varsR := make([]*regexp.Regexp, len(idxs)/2)
+ pattern := bytes.NewBufferString("")
+ pattern.WriteByte('^')
+ reverse := bytes.NewBufferString("")
+ var end int
+ var err error
+ for i := 0; i < len(idxs); i += 2 {
+ // Set all values we are interested in.
+ raw := tpl[end:idxs[i]]
+ end = idxs[i+1]
+ parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+ name := parts[0]
+ patt := defaultPattern
+ if len(parts) == 2 {
+ patt = parts[1]
+ }
+ // Name or pattern can't be empty.
+ if name == "" || patt == "" {
+ return nil, fmt.Errorf("mux: missing name or pattern in %q",
+ tpl[idxs[i]:end])
+ }
+ // Build the regexp pattern.
+ fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+ // Build the reverse template.
+ fmt.Fprintf(reverse, "%s%%s", raw)
+
+ // Append variable name and compiled pattern.
+ varsN[i/2] = name
+ varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Add the remaining.
+ raw := tpl[end:]
+ pattern.WriteString(regexp.QuoteMeta(raw))
+ if options.strictSlash {
+ pattern.WriteString("[/]?")
+ }
+ if typ == regexpTypeQuery {
+ // Add the default pattern if the query value is empty
+ if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+ pattern.WriteString(defaultPattern)
+ }
+ }
+ if typ != regexpTypePrefix {
+ pattern.WriteByte('$')
+ }
+ reverse.WriteString(raw)
+ if endSlash {
+ reverse.WriteByte('/')
+ }
+ // Compile full regexp.
+ reg, errCompile := regexp.Compile(pattern.String())
+ if errCompile != nil {
+ return nil, errCompile
+ }
+
+ // Check for capturing groups which used to work in older versions
+ if reg.NumSubexp() != len(idxs)/2 {
+ panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+ "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+ }
+
+ // Done!
+ return &routeRegexp{
+ template: template,
+ regexpType: typ,
+ options: options,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
+ }, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+ // The unmodified template.
+ template string
+ // The type of match
+ regexpType regexpType
+ // Options for matching
+ options routeRegexpOptions
+ // Expanded regexp.
+ regexp *regexp.Regexp
+ // Reverse template.
+ reverse string
+ // Variable names.
+ varsN []string
+ // Variable regexps (validators).
+ varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+ if r.regexpType != regexpTypeHost {
+ if r.regexpType == regexpTypeQuery {
+ return r.matchQueryString(req)
+ }
+ path := req.URL.Path
+ if r.options.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ return r.regexp.MatchString(path)
+ }
+
+ return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+ urlValues := make([]interface{}, len(r.varsN))
+ for k, v := range r.varsN {
+ value, ok := values[v]
+ if !ok {
+ return "", fmt.Errorf("mux: missing route variable %q", v)
+ }
+ if r.regexpType == regexpTypeQuery {
+ value = url.QueryEscape(value)
+ }
+ urlValues[k] = value
+ }
+ rv := fmt.Sprintf(r.reverse, urlValues...)
+ if !r.regexp.MatchString(rv) {
+ // The URL is checked against the full regexp, instead of checking
+ // individual variables. This is faster but to provide a good error
+ // message, we check individual regexps if the URL doesn't match.
+ for k, v := range r.varsN {
+ if !r.varsR[k].MatchString(values[v]) {
+ return "", fmt.Errorf(
+ "mux: variable %q doesn't match, expected %q", values[v],
+ r.varsR[k].String())
+ }
+ }
+ }
+ return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+ if r.regexpType != regexpTypeQuery {
+ return ""
+ }
+ templateKey := strings.SplitN(r.template, "=", 2)[0]
+ for key, vals := range req.URL.Query() {
+ if key == templateKey && len(vals) > 0 {
+ return key + "=" + vals[0]
+ }
+ }
+ return ""
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+ return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+ var level, idx int
+ var idxs []int
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '{':
+ if level++; level == 1 {
+ idx = i
+ }
+ case '}':
+ if level--; level == 0 {
+ idxs = append(idxs, idx, i+1)
+ } else if level < 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ }
+ }
+ if level != 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+ return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+ host *routeRegexp
+ path *routeRegexp
+ queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+ // Store host variables.
+ if v.host != nil {
+ host := getHost(req)
+ matches := v.host.regexp.FindStringSubmatchIndex(host)
+ if len(matches) > 0 {
+ extractVars(host, matches, v.host.varsN, m.Vars)
+ }
+ }
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ // Store path variables.
+ if v.path != nil {
+ matches := v.path.regexp.FindStringSubmatchIndex(path)
+ if len(matches) > 0 {
+ extractVars(path, matches, v.path.varsN, m.Vars)
+ // Check if we should redirect.
+ if v.path.options.strictSlash {
+ p1 := strings.HasSuffix(path, "/")
+ p2 := strings.HasSuffix(v.path.template, "/")
+ if p1 != p2 {
+ u, _ := url.Parse(req.URL.String())
+ if p1 {
+ u.Path = u.Path[:len(u.Path)-1]
+ } else {
+ u.Path += "/"
+ }
+ m.Handler = http.RedirectHandler(u.String(), 301)
+ }
+ }
+ }
+ }
+ // Store query string variables.
+ for _, q := range v.queries {
+ queryURL := q.getURLQuery(req)
+ matches := q.regexp.FindStringSubmatchIndex(queryURL)
+ if len(matches) > 0 {
+ extractVars(queryURL, matches, q.varsN, m.Vars)
+ }
+ }
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+ if r.URL.IsAbs() {
+ return r.URL.Host
+ }
+ host := r.Host
+ // Slice off any port information.
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ return host
+
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+ for i, name := range names {
+ output[name] = input[matches[2*i+2]:matches[2*i+3]]
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..a591d73
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,763 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+ // Parent where the route was registered (a Router).
+ parent parentRoute
+ // Request handler for the route.
+ handler http.Handler
+ // List of matchers.
+ matchers []matcher
+ // Manager for the variables from host and path.
+ regexp *routeRegexpGroup
+ // If true, when the path pattern is "/path/", accessing "/path" will
+ // redirect to the former and vice versa.
+ strictSlash bool
+ // If true, when the path pattern is "/path//to", accessing "/path//to"
+ // will not redirect
+ skipClean bool
+ // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+ useEncodedPath bool
+ // The scheme used when building URLs.
+ buildScheme string
+ // If true, this route never matches: it is only used to build URLs.
+ buildOnly bool
+ // The name used to build URLs.
+ name string
+ // Error resulted from building a route.
+ err error
+
+ buildVarsFunc BuildVarsFunc
+}
+
+// SkipClean reports whether path cleaning is enabled for this route via
+// Router.SkipClean.
+func (r *Route) SkipClean() bool {
+ return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+ if r.buildOnly || r.err != nil {
+ return false
+ }
+
+ var matchErr error
+
+ // Match everything.
+ for _, m := range r.matchers {
+ if matched := m.Match(req, match); !matched {
+ if _, ok := m.(methodMatcher); ok {
+ matchErr = ErrMethodMismatch
+ continue
+ }
+ matchErr = nil
+ return false
+ }
+ }
+
+ if matchErr != nil {
+ match.MatchErr = matchErr
+ return false
+ }
+
+ if match.MatchErr == ErrMethodMismatch {
+ // We found a route which matches request method, clear MatchErr
+ match.MatchErr = nil
+ // Then override the mis-matched handler
+ match.Handler = r.handler
+ }
+
+ // Yay, we have a match. Let's collect some info about it.
+ if match.Route == nil {
+ match.Route = r
+ }
+ if match.Handler == nil {
+ match.Handler = r.handler
+ }
+ if match.Vars == nil {
+ match.Vars = make(map[string]string)
+ }
+
+ // Set variables.
+ if r.regexp != nil {
+ r.regexp.setMatch(req, match, r)
+ }
+ return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+ return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+ r.buildOnly = true
+ return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+ if r.err == nil {
+ r.handler = handler
+ }
+ return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+ return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+ return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+ if r.name != "" {
+ r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+ r.name, name)
+ }
+ if r.err == nil {
+ r.name = name
+ r.getNamedRoutes()[name] = r
+ }
+ return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+ return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+ Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+ if r.err == nil {
+ r.matchers = append(r.matchers, m)
+ }
+ return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
+ if r.err != nil {
+ return r.err
+ }
+ r.regexp = r.getRegexpGroup()
+ if typ == regexpTypePath || typ == regexpTypePrefix {
+ if len(tpl) > 0 && tpl[0] != '/' {
+ return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+ }
+ if r.regexp.path != nil {
+ tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+ }
+ }
+ rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
+ strictSlash: r.strictSlash,
+ useEncodedPath: r.useEncodedPath,
+ })
+ if err != nil {
+ return err
+ }
+ for _, q := range r.regexp.queries {
+ if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+ return err
+ }
+ }
+ if typ == regexpTypeHost {
+ if r.regexp.path != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+ return err
+ }
+ }
+ r.regexp.host = rr
+ } else {
+ if r.regexp.host != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+ return err
+ }
+ }
+ if typ == regexpTypeQuery {
+ r.regexp.queries = append(r.regexp.queries, rr)
+ } else {
+ r.regexp.path = rr
+ }
+ }
+ r.addMatcher(rr)
+ return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+// r := mux.NewRouter()
+// r.Headers("Content-Type", "application/json",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]string
+ headers, r.err = mapFromPairsToString(pairs...)
+ return r.addMatcher(headerMatcher(headers))
+ }
+ return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+// r := mux.NewRouter()
+// r.HeadersRegexp("Content-Type", "application/(text|json)",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// If the value is an empty string, it will match any value if the key is set.
+// Use the start and end of string anchors (^ and $) to match an exact value.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]*regexp.Regexp
+ headers, r.err = mapFromPairsToRegex(pairs...)
+ return r.addMatcher(headerRegexMatcher(headers))
+ }
+ return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Host("www.example.com")
+// r.Host("{subdomain}.domain.com")
+// r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
+ return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+ return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+ return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+ for k, v := range methods {
+ methods[k] = strings.ToUpper(v)
+ }
+ return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Path("/products/").Handler(ProductsHandler)
+// r.Path("/products/{key}").Handler(ProductsHandler)
+// r.Path("/articles/{category}/{id:[0-9]+}").
+// Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypePath)
+ return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
+ return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+// r := mux.NewRouter()
+// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+ length := len(pairs)
+ if length%2 != 0 {
+ r.err = fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ return nil
+ }
+ for i := 0; i < length; i += 2 {
+ if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
+ return r
+ }
+ }
+
+ return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+ for k, v := range schemes {
+ schemes[k] = strings.ToLower(v)
+ }
+ if r.buildScheme == "" && len(schemes) > 0 {
+ r.buildScheme = schemes[0]
+ }
+ return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+ r.buildVarsFunc = f
+ return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+// r := mux.NewRouter()
+// s := r.Host("www.example.com").Subrouter()
+// s.HandleFunc("/products/", ProductsHandler)
+// s.HandleFunc("/products/{key}", ProductHandler)
+// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+ router := &Router{parent: r, strictSlash: r.strictSlash}
+ r.addMatcher(router)
+ return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// ...a URL for it can be built using:
+//
+// url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+// "/articles/technology/42"
+//
+// This also works for host variables:
+//
+// r := mux.NewRouter()
+// r.Host("{subdomain}.domain.com").
+// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// // url.String() will be "http://news.domain.com/articles/technology/42"
+// url, err := r.Get("article").URL("subdomain", "news",
+// "category", "technology",
+// "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil {
+ return nil, errors.New("mux: route doesn't have a host or path")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ var scheme, host, path string
+ queries := make([]string, 0, len(r.regexp.queries))
+ if r.regexp.host != nil {
+ if host, err = r.regexp.host.url(values); err != nil {
+ return nil, err
+ }
+ scheme = "http"
+ if s := r.getBuildScheme(); s != "" {
+ scheme = s
+ }
+ }
+ if r.regexp.path != nil {
+ if path, err = r.regexp.path.url(values); err != nil {
+ return nil, err
+ }
+ }
+ for _, q := range r.regexp.queries {
+ var query string
+ if query, err = q.url(values); err != nil {
+ return nil, err
+ }
+ queries = append(queries, query)
+ }
+ return &url.URL{
+ Scheme: scheme,
+ Host: host,
+ Path: path,
+ RawQuery: strings.Join(queries, "&"),
+ }, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.host == nil {
+ return nil, errors.New("mux: route doesn't have a host")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ host, err := r.regexp.host.url(values)
+ if err != nil {
+ return nil, err
+ }
+ u := &url.URL{
+ Scheme: "http",
+ Host: host,
+ }
+ if s := r.getBuildScheme(); s != "" {
+ u.Scheme = s
+ }
+ return u, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.path == nil {
+ return nil, errors.New("mux: route doesn't have a path")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ path, err := r.regexp.path.url(values)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Path: path,
+ }, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp == nil || r.regexp.path == nil {
+ return "", errors.New("mux: route doesn't have a path")
+ }
+ return r.regexp.path.template, nil
+}
+
+// GetPathRegexp returns the expanded regular expression used to match route path.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathRegexp() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp == nil || r.regexp.path == nil {
+ return "", errors.New("mux: route does not have a path")
+ }
+ return r.regexp.path.regexp.String(), nil
+}
+
+// GetQueriesRegexp returns the expanded regular expressions used to match the
+// route queries.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not have queries.
+func (r *Route) GetQueriesRegexp() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.queries == nil {
+ return nil, errors.New("mux: route doesn't have queries")
+ }
+ var queries []string
+ for _, query := range r.regexp.queries {
+ queries = append(queries, query.regexp.String())
+ }
+ return queries, nil
+}
+
+// GetQueriesTemplates returns the templates used to build the
+// query matching.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define queries.
+func (r *Route) GetQueriesTemplates() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.queries == nil {
+ return nil, errors.New("mux: route doesn't have queries")
+ }
+ var queries []string
+ for _, query := range r.regexp.queries {
+ queries = append(queries, query.template)
+ }
+ return queries, nil
+}
+
+// GetMethods returns the methods the route matches against
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if route does not have methods.
+func (r *Route) GetMethods() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ for _, m := range r.matchers {
+ if methods, ok := m.(methodMatcher); ok {
+ return []string(methods), nil
+ }
+ }
+ return nil, errors.New("mux: route doesn't have methods")
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp == nil || r.regexp.host == nil {
+ return "", errors.New("mux: route doesn't have a host")
+ }
+ return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+ m, err := mapFromPairsToString(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+ if r.parent != nil {
+ m = r.parent.buildVars(m)
+ }
+ if r.buildVarsFunc != nil {
+ m = r.buildVarsFunc(m)
+ }
+ return m
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+ getBuildScheme() string
+ getNamedRoutes() map[string]*Route
+ getRegexpGroup() *routeRegexpGroup
+ buildVars(map[string]string) map[string]string
+}
+
+func (r *Route) getBuildScheme() string {
+ if r.buildScheme != "" {
+ return r.buildScheme
+ }
+ if r.parent != nil {
+ return r.parent.getBuildScheme()
+ }
+ return ""
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+ if r.parent == nil {
+ // During tests router is not always set.
+ r.parent = NewRouter()
+ }
+ return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+ if r.regexp == nil {
+ if r.parent == nil {
+ // During tests router is not always set.
+ r.parent = NewRouter()
+ }
+ regexp := r.parent.getRegexpGroup()
+ if regexp == nil {
+ r.regexp = new(routeRegexpGroup)
+ } else {
+ // Copy.
+ r.regexp = &routeRegexpGroup{
+ host: regexp.host,
+ path: regexp.path,
+ queries: regexp.queries,
+ }
+ }
+ }
+ return r.regexp
+}
diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go
new file mode 100644
index 0000000..32ecffd
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/test_helpers.go
@@ -0,0 +1,19 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import "net/http"
+
+// SetURLVars sets the URL variables for the given request, to be accessed via
+// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
+// copy is returned.
+//
+// This API should only be used for testing purposes; it provides a way to
+// inject variables into the request context. Alternatively, URL variables
+// can be set by making a route that captures the required variables,
+// starting a server and sending the request to that server.
+func SetURLVars(r *http.Request, val map[string]string) *http.Request {
+ return setVars(r, val)
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore
new file mode 100644
index 0000000..531fcc1
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/.gitignore
@@ -0,0 +1,4 @@
+jpgo
+jmespath-fuzz.zip
+cpu.out
+go-jmespath.test
diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml
new file mode 100644
index 0000000..1f98077
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+sudo: false
+
+go:
+ - 1.4
+
+install: go get -v -t ./...
+script: make test
diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 0000000..b03310a
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile
new file mode 100644
index 0000000..a828d28
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/Makefile
@@ -0,0 +1,44 @@
+
+CMD = jpgo
+
+help:
+ @echo "Please use \`make ' where is one of"
+ @echo " test to run all the tests"
+ @echo " build to build the library and jp executable"
+ @echo " generate to run codegen"
+
+
+generate:
+ go generate ./...
+
+build:
+ rm -f $(CMD)
+ go build ./...
+ rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
+ mv cmd/$(CMD)/$(CMD) .
+
+test:
+ go test -v ./...
+
+check:
+ go vet ./...
+ @echo "golint ./..."
+ @lint=`golint ./...`; \
+ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
+ echo "$$lint"; \
+ if [ "$$lint" != "" ]; then exit 1; fi
+
+htmlc:
+ go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
+
+buildfuzz:
+ go-fuzz-build github.com/jmespath/go-jmespath/fuzz
+
+fuzz: buildfuzz
+ go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
+
+bench:
+ go test -bench . -cpuprofile cpu.out
+
+pprof-cpu:
+ go tool pprof ./go-jmespath.test ./cpu.out
diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md
new file mode 100644
index 0000000..187ef67
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/README.md
@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100644
index 0000000..9cfa988
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+ ast ASTNode
+ intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+ return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+ jmespath, err := Compile(expression)
+ if err != nil {
+ panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+ }
+ return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+ return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+ intr := newInterpreter()
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(ast, data)
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644
index 0000000..1cd2d23
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+ if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+ return fmt.Sprintf("astNodeType(%d)", i)
+ }
+ return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100644
index 0000000..9b7cd89
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+ jpUnknown jpType = "unknown"
+ jpNumber jpType = "number"
+ jpString jpType = "string"
+ jpArray jpType = "array"
+ jpObject jpType = "object"
+ jpArrayNumber jpType = "array[number]"
+ jpArrayString jpType = "array[string]"
+ jpExpref jpType = "expref"
+ jpAny jpType = "any"
+)
+
+type functionEntry struct {
+ name string
+ arguments []argSpec
+ handler jpFunction
+ hasExpRef bool
+}
+
+type argSpec struct {
+ types []jpType
+ variadic bool
+}
+
+type byExprString struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprString) Len() int {
+ return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type byExprFloat struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+ return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type functionCaller struct {
+ functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+ caller := &functionCaller{}
+ caller.functionTable = map[string]functionEntry{
+ "length": {
+ name: "length",
+ arguments: []argSpec{
+ {types: []jpType{jpString, jpArray, jpObject}},
+ },
+ handler: jpfLength,
+ },
+ "starts_with": {
+ name: "starts_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfStartsWith,
+ },
+ "abs": {
+ name: "abs",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfAbs,
+ },
+ "avg": {
+ name: "avg",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfAvg,
+ },
+ "ceil": {
+ name: "ceil",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfCeil,
+ },
+ "contains": {
+ name: "contains",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfContains,
+ },
+ "ends_with": {
+ name: "ends_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfEndsWith,
+ },
+ "floor": {
+ name: "floor",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfFloor,
+ },
+ "map": {
+ name: "amp",
+ arguments: []argSpec{
+ {types: []jpType{jpExpref}},
+ {types: []jpType{jpArray}},
+ },
+ handler: jpfMap,
+ hasExpRef: true,
+ },
+ "max": {
+ name: "max",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMax,
+ },
+ "merge": {
+ name: "merge",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}, variadic: true},
+ },
+ handler: jpfMerge,
+ },
+ "max_by": {
+ name: "max_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMaxBy,
+ hasExpRef: true,
+ },
+ "sum": {
+ name: "sum",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfSum,
+ },
+ "min": {
+ name: "min",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMin,
+ },
+ "min_by": {
+ name: "min_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMinBy,
+ hasExpRef: true,
+ },
+ "type": {
+ name: "type",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfType,
+ },
+ "keys": {
+ name: "keys",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfKeys,
+ },
+ "values": {
+ name: "values",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfValues,
+ },
+ "sort": {
+ name: "sort",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayString, jpArrayNumber}},
+ },
+ handler: jpfSort,
+ },
+ "sort_by": {
+ name: "sort_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfSortBy,
+ hasExpRef: true,
+ },
+ "join": {
+ name: "join",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpArrayString}},
+ },
+ handler: jpfJoin,
+ },
+ "reverse": {
+ name: "reverse",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ },
+ handler: jpfReverse,
+ },
+ "to_array": {
+ name: "to_array",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToArray,
+ },
+ "to_string": {
+ name: "to_string",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToString,
+ },
+ "to_number": {
+ name: "to_number",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToNumber,
+ },
+ "not_null": {
+ name: "not_null",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}, variadic: true},
+ },
+ handler: jpfNotNull,
+ },
+ }
+ return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+ if len(e.arguments) == 0 {
+ return arguments, nil
+ }
+ if !e.arguments[len(e.arguments)-1].variadic {
+ if len(e.arguments) != len(arguments) {
+ return nil, errors.New("incorrect number of args")
+ }
+ for i, spec := range e.arguments {
+ userArg := arguments[i]
+ err := spec.typeCheck(userArg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arguments, nil
+ }
+ if len(arguments) < len(e.arguments) {
+ return nil, errors.New("Invalid arity.")
+ }
+ return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+ for _, t := range a.types {
+ switch t {
+ case jpNumber:
+ if _, ok := arg.(float64); ok {
+ return nil
+ }
+ case jpString:
+ if _, ok := arg.(string); ok {
+ return nil
+ }
+ case jpArray:
+ if isSliceType(arg) {
+ return nil
+ }
+ case jpObject:
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil
+ }
+ case jpArrayNumber:
+ if _, ok := toArrayNum(arg); ok {
+ return nil
+ }
+ case jpArrayString:
+ if _, ok := toArrayStr(arg); ok {
+ return nil
+ }
+ case jpAny:
+ return nil
+ case jpExpref:
+ if _, ok := arg.(expRef); ok {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+ entry, ok := f.functionTable[name]
+ if !ok {
+ return nil, errors.New("unknown function: " + name)
+ }
+ resolvedArgs, err := entry.resolveArgs(arguments)
+ if err != nil {
+ return nil, err
+ }
+ if entry.hasExpRef {
+ var extra []interface{}
+ extra = append(extra, intr)
+ resolvedArgs = append(extra, resolvedArgs...)
+ }
+ return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+ num := arguments[0].(float64)
+ return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if c, ok := arg.(string); ok {
+ return float64(utf8.RuneCountInString(c)), nil
+ } else if isSliceType(arg) {
+ v := reflect.ValueOf(arg)
+ return float64(v.Len()), nil
+ } else if c, ok := arg.(map[string]interface{}); ok {
+ return float64(len(c)), nil
+ }
+ return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ prefix := arguments[1].(string)
+ return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+ // We've already type checked the value so we can safely use
+ // type assertions.
+ args := arguments[0].([]interface{})
+ length := float64(len(args))
+ numerator := 0.0
+ for _, n := range args {
+ numerator += n.(float64)
+ }
+ return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+ search := arguments[0]
+ el := arguments[1]
+ if searchStr, ok := search.(string); ok {
+ if elStr, ok := el.(string); ok {
+ return strings.Index(searchStr, elStr) != -1, nil
+ }
+ return false, nil
+ }
+ // Otherwise this is a generic contains for []interface{}
+ general := search.([]interface{})
+ for _, item := range general {
+ if item == el {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ suffix := arguments[1].(string)
+ return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ exp := arguments[1].(expRef)
+ node := exp.ref
+ arr := arguments[2].([]interface{})
+ mapped := make([]interface{}, 0, len(arr))
+ for _, value := range arr {
+ current, err := intr.Execute(node, value)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, current)
+ }
+ return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ // Otherwise we're dealing with a max() of strings.
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+ final := make(map[string]interface{})
+ for _, m := range arguments {
+ mapped := m.(map[string]interface{})
+ for key, value := range mapped {
+ final[key] = value
+ }
+ }
+ return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ switch t := start.(type) {
+ case float64:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ case string:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ default:
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+ items, _ := toArrayNum(arguments[0])
+ sum := 0.0
+ for _, item := range items {
+ sum += item
+ }
+ return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := start.(float64); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else if t, ok := start.(string); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if _, ok := arg.(float64); ok {
+ return "number", nil
+ }
+ if _, ok := arg.(string); ok {
+ return "string", nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return "array", nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return "object", nil
+ }
+ if arg == nil {
+ return "null", nil
+ }
+ if arg == true || arg == false {
+ return "boolean", nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for key := range arg {
+ collected = append(collected, key)
+ }
+ return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for _, value := range arg {
+ collected = append(collected, value)
+ }
+ return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ d := sort.Float64Slice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+ }
+ // Otherwise we're dealing with sort()'ing strings.
+ items, _ := toArrayStr(arguments[0])
+ d := sort.StringSlice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return arr, nil
+ } else if len(arr) == 1 {
+ return arr, nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := start.(float64); ok {
+ sortable := &byExprFloat{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else if _, ok := start.(string); ok {
+ sortable := &byExprString{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+ sep := arguments[0].(string)
+ // We can't just do arguments[1].([]string), we have to
+ // manually convert each item to a string.
+ arrayStr := []string{}
+ for _, item := range arguments[1].([]interface{}) {
+ arrayStr = append(arrayStr, item.(string))
+ }
+ return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+ if s, ok := arguments[0].(string); ok {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r), nil
+ }
+ items := arguments[0].([]interface{})
+ length := len(items)
+ reversed := make([]interface{}, length)
+ for i, item := range items {
+ reversed[length-(i+1)] = item
+ }
+ return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+ if _, ok := arguments[0].([]interface{}); ok {
+ return arguments[0], nil
+ }
+ return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+ if v, ok := arguments[0].(string); ok {
+ return v, nil
+ }
+ result, err := json.Marshal(arguments[0])
+ if err != nil {
+ return nil, err
+ }
+ return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if v, ok := arg.(float64); ok {
+ return v, nil
+ }
+ if v, ok := arg.(string); ok {
+ conv, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, nil
+ }
+ return conv, nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return nil, nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil, nil
+ }
+ if arg == nil {
+ return nil, nil
+ }
+ if arg == true || arg == false {
+ return nil, nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+ for _, arg := range arguments {
+ if arg != nil {
+ return arg, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644
index 0000000..13c7460
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+/* This is a tree based interpreter. It walks the AST and directly
+ interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+ fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+ interpreter := treeInterpreter{}
+ interpreter.fCall = newFunctionCaller()
+ return &interpreter
+}
+
+type expRef struct {
+ ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+ switch node.nodeType {
+ case ASTComparator:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ right, err := intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ switch node.value {
+ case tEQ:
+ return objsEqual(left, right), nil
+ case tNE:
+ return !objsEqual(left, right), nil
+ }
+ leftNum, ok := left.(float64)
+ if !ok {
+ return nil, nil
+ }
+ rightNum, ok := right.(float64)
+ if !ok {
+ return nil, nil
+ }
+ switch node.value {
+ case tGT:
+ return leftNum > rightNum, nil
+ case tGTE:
+ return leftNum >= rightNum, nil
+ case tLT:
+ return leftNum < rightNum, nil
+ case tLTE:
+ return leftNum <= rightNum, nil
+ }
+ case ASTExpRef:
+ return expRef{ref: node.children[0]}, nil
+ case ASTFunctionExpression:
+ resolvedArgs := []interface{}{}
+ for _, arg := range node.children {
+ current, err := intr.Execute(arg, value)
+ if err != nil {
+ return nil, err
+ }
+ resolvedArgs = append(resolvedArgs, current)
+ }
+ return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+ case ASTField:
+ if m, ok := value.(map[string]interface{}); ok {
+ key := node.value.(string)
+ return m[key], nil
+ }
+ return intr.fieldFromStruct(node.value.(string), value)
+ case ASTFilterProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.filterProjectionWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ for _, element := range sliceType {
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+ case ASTFlatten:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ // If we can't type convert to []interface{}, there's
+ // a chance this could still work via reflection if we're
+ // dealing with user provided types.
+ if isSliceType(left) {
+ return intr.flattenWithReflection(left)
+ }
+ return nil, nil
+ }
+ flattened := []interface{}{}
+ for _, element := range sliceType {
+ if elementSlice, ok := element.([]interface{}); ok {
+ flattened = append(flattened, elementSlice...)
+ } else if isSliceType(element) {
+ reflectFlat := []interface{}{}
+ v := reflect.ValueOf(element)
+ for i := 0; i < v.Len(); i++ {
+ reflectFlat = append(reflectFlat, v.Index(i).Interface())
+ }
+ flattened = append(flattened, reflectFlat...)
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+ case ASTIdentity, ASTCurrentNode:
+ return value, nil
+ case ASTIndex:
+ if sliceType, ok := value.([]interface{}); ok {
+ index := node.value.(int)
+ if index < 0 {
+ index += len(sliceType)
+ }
+ if index < len(sliceType) && index >= 0 {
+ return sliceType[index], nil
+ }
+ return nil, nil
+ }
+ // Otherwise try via reflection.
+ rv := reflect.ValueOf(value)
+ if rv.Kind() == reflect.Slice {
+ index := node.value.(int)
+ if index < 0 {
+ index += rv.Len()
+ }
+ if index < rv.Len() && index >= 0 {
+ v := rv.Index(index)
+ return v.Interface(), nil
+ }
+ }
+ return nil, nil
+ case ASTKeyValPair:
+ return intr.Execute(node.children[0], value)
+ case ASTLiteral:
+ return node.value, nil
+ case ASTMultiSelectHash:
+ if value == nil {
+ return nil, nil
+ }
+ collected := make(map[string]interface{})
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ key := child.value.(string)
+ collected[key] = current
+ }
+ return collected, nil
+ case ASTMultiSelectList:
+ if value == nil {
+ return nil, nil
+ }
+ collected := []interface{}{}
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ collected = append(collected, current)
+ }
+ return collected, nil
+ case ASTOrExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ matched, err = intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matched, nil
+ case ASTAndExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return matched, nil
+ }
+ return intr.Execute(node.children[1], value)
+ case ASTNotExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return true, nil
+ }
+ return false, nil
+ case ASTPipe:
+ result := value
+ var err error
+ for _, child := range node.children {
+ result, err = intr.Execute(child, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case ASTProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.projectWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ collected := []interface{}{}
+ var current interface{}
+ for _, element := range sliceType {
+ current, err = intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ case ASTSubexpression, ASTIndexExpression:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(node.children[1], left)
+ case ASTSlice:
+ sliceType, ok := value.([]interface{})
+ if !ok {
+ if isSliceType(value) {
+ return intr.sliceWithReflection(node, value)
+ }
+ return nil, nil
+ }
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ return slice(sliceType, sliceParams)
+ case ASTValueProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ mapType, ok := left.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ values := make([]interface{}, len(mapType))
+ for _, value := range mapType {
+ values = append(values, value)
+ }
+ collected := []interface{}{}
+ for _, element := range values {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ }
+ return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(value)
+ first, n := utf8.DecodeRuneInString(key)
+ fieldName := string(unicode.ToUpper(first)) + key[n:]
+ if rv.Kind() == reflect.Struct {
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ } else if rv.Kind() == reflect.Ptr {
+ // Handle multiple levels of indirection?
+ if rv.IsNil() {
+ return nil, nil
+ }
+ rv = rv.Elem()
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ }
+ return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ flattened := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ if reflect.TypeOf(element).Kind() == reflect.Slice {
+ // Then insert the contents of the element
+ // slice into the flattened slice,
+ // i.e flattened = append(flattened, mySlice...)
+ elementV := reflect.ValueOf(element)
+ for j := 0; j < elementV.Len(); j++ {
+ flattened = append(
+ flattened, elementV.Index(j).Interface())
+ }
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ final := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ final = append(final, element)
+ }
+ return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ collected = append(collected, result)
+ }
+ }
+ return collected, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644
index 0000000..817900c
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ tokenType tokType
+ value string
+ position int
+ length int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+ expression string // The expression provided by the user.
+ currentPos int // The current position in the string.
+ lastWidth int // The width of the current rune. This
+ buf bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+ msg string // Error message displayed to user
+ Expression string // Expression that generated a SyntaxError
+ Offset int // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+ // In the future, it would be good to underline the specific
+ // location where the error occurred.
+ return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+ return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+ tUnknown tokType = iota
+ tStar
+ tDot
+ tFilter
+ tFlatten
+ tLparen
+ tRparen
+ tLbracket
+ tRbracket
+ tLbrace
+ tRbrace
+ tOr
+ tPipe
+ tNumber
+ tUnquotedIdentifier
+ tQuotedIdentifier
+ tComma
+ tColon
+ tLT
+ tLTE
+ tGT
+ tGTE
+ tEQ
+ tNE
+ tJSONLiteral
+ tStringLiteral
+ tCurrent
+ tExpref
+ tAnd
+ tNot
+ tEOF
+)
+
+var basicTokens = map[rune]tokType{
+ '.': tDot,
+ '*': tStar,
+ ',': tComma,
+ ':': tColon,
+ '{': tLbrace,
+ '}': tRbrace,
+ ']': tRbracket, // tLbracket not included because it could be "[]"
+ '(': tLparen,
+ ')': tRparen,
+ '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+ ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+ return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+ t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+ lexer := Lexer{}
+ return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+ if lexer.currentPos >= len(lexer.expression) {
+ lexer.lastWidth = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+ lexer.lastWidth = w
+ lexer.currentPos += w
+ return r
+}
+
+func (lexer *Lexer) back() {
+ lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+ t := lexer.next()
+ lexer.back()
+ return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+ var tokens []token
+ lexer.expression = expression
+ lexer.currentPos = 0
+ lexer.lastWidth = 0
+loop:
+ for {
+ r := lexer.next()
+ if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+ t := lexer.consumeUnquotedIdentifier()
+ tokens = append(tokens, t)
+ } else if val, ok := basicTokens[r]; ok {
+ // Basic single char token.
+ t := token{
+ tokenType: val,
+ value: string(r),
+ position: lexer.currentPos - lexer.lastWidth,
+ length: 1,
+ }
+ tokens = append(tokens, t)
+ } else if r == '-' || (r >= '0' && r <= '9') {
+ t := lexer.consumeNumber()
+ tokens = append(tokens, t)
+ } else if r == '[' {
+ t := lexer.consumeLBracket()
+ tokens = append(tokens, t)
+ } else if r == '"' {
+ t, err := lexer.consumeQuotedIdentifier()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '\'' {
+ t, err := lexer.consumeRawStringLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '`' {
+ t, err := lexer.consumeLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '|' {
+ t := lexer.matchOrElse(r, '|', tOr, tPipe)
+ tokens = append(tokens, t)
+ } else if r == '<' {
+ t := lexer.matchOrElse(r, '=', tLTE, tLT)
+ tokens = append(tokens, t)
+ } else if r == '>' {
+ t := lexer.matchOrElse(r, '=', tGTE, tGT)
+ tokens = append(tokens, t)
+ } else if r == '!' {
+ t := lexer.matchOrElse(r, '=', tNE, tNot)
+ tokens = append(tokens, t)
+ } else if r == '=' {
+ t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+ tokens = append(tokens, t)
+ } else if r == '&' {
+ t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+ tokens = append(tokens, t)
+ } else if r == eof {
+ break loop
+ } else if _, ok := whiteSpace[r]; ok {
+ // Ignore whitespace
+ } else {
+ return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+ }
+ }
+ tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+ return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+ start := lexer.currentPos
+ current := lexer.next()
+ for current != end && current != eof {
+ if current == '\\' && lexer.peek() != eof {
+ lexer.next()
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return "", SyntaxError{
+ msg: "Unclosed delimiter: " + string(end),
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('`')
+ if err != nil {
+ return token{}, err
+ }
+ value = strings.Replace(value, "\\`", "`", -1)
+ return token{
+ tokenType: tJSONLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+ start := lexer.currentPos
+ currentIndex := start
+ current := lexer.next()
+ for current != '\'' && lexer.peek() != eof {
+ if current == '\\' && lexer.peek() == '\'' {
+ chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+ lexer.buf.WriteString(chunk)
+ lexer.buf.WriteString("'")
+ lexer.next()
+ currentIndex = lexer.currentPos
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return token{}, SyntaxError{
+ msg: "Unclosed delimiter: '",
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ if currentIndex < lexer.currentPos {
+ lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+ }
+ value := lexer.buf.String()
+ // Reset the buffer so it can reused again.
+ lexer.buf.Reset()
+ return token{
+ tokenType: tStringLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: lexer.expression,
+ Offset: lexer.currentPos - 1,
+ }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == second {
+ t = token{
+ tokenType: matchedType,
+ value: string(first) + string(second),
+ position: start,
+ length: 2,
+ }
+ } else {
+ lexer.back()
+ t = token{
+ tokenType: singleCharType,
+ value: string(first),
+ position: start,
+ length: 1,
+ }
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+ // There's three options here:
+ // 1. A filter expression "[?"
+ // 2. A flatten operator "[]"
+ // 3. A bare rbracket "["
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == '?' {
+ t = token{
+ tokenType: tFilter,
+ value: "[?",
+ position: start,
+ length: 2,
+ }
+ } else if nextRune == ']' {
+ t = token{
+ tokenType: tFlatten,
+ value: "[]",
+ position: start,
+ length: 2,
+ }
+ } else {
+ t = token{
+ tokenType: tLbracket,
+ value: "[",
+ position: start,
+ length: 1,
+ }
+ lexer.back()
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('"')
+ if err != nil {
+ return token{}, err
+ }
+ var decoded string
+ asJSON := []byte("\"" + value + "\"")
+ if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+ return token{}, err
+ }
+ return token{
+ tokenType: tQuotedIdentifier,
+ value: decoded,
+ position: start - 1,
+ length: len(decoded),
+ }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+ // Consume runes until we reach the end of an unquoted
+ // identifier.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tUnquotedIdentifier,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+ // Consume runes until we reach something that's not a number.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < '0' || r > '9' {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tNumber,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100644
index 0000000..1240a17
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+ ASTEmpty astNodeType = iota
+ ASTComparator
+ ASTCurrentNode
+ ASTExpRef
+ ASTFunctionExpression
+ ASTField
+ ASTFilterProjection
+ ASTFlatten
+ ASTIdentity
+ ASTIndex
+ ASTIndexExpression
+ ASTKeyValPair
+ ASTLiteral
+ ASTMultiSelectHash
+ ASTMultiSelectList
+ ASTOrExpression
+ ASTAndExpression
+ ASTNotExpression
+ ASTPipe
+ ASTProjection
+ ASTSubexpression
+ ASTSlice
+ ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+ nodeType astNodeType
+ value interface{}
+ children []ASTNode
+}
+
+func (node ASTNode) String() string {
+ return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging. You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+ spaces := strings.Repeat(" ", indent)
+ output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+ nextIndent := indent + 2
+ if node.value != nil {
+ if converted, ok := node.value.(fmt.Stringer); ok {
+ // Account for things like comparator nodes
+ // that are enums with a String() method.
+ output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+ } else {
+ output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+ }
+ }
+ lastIndex := len(node.children)
+ if lastIndex > 0 {
+ output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+ childIndent := nextIndent + 2
+ for _, elem := range node.children {
+ output += elem.PrettyPrint(childIndent)
+ }
+ }
+ output += fmt.Sprintf("%s}\n", spaces)
+ return output
+}
+
+var bindingPowers = map[tokType]int{
+ tEOF: 0,
+ tUnquotedIdentifier: 0,
+ tQuotedIdentifier: 0,
+ tRbracket: 0,
+ tRparen: 0,
+ tComma: 0,
+ tRbrace: 0,
+ tNumber: 0,
+ tCurrent: 0,
+ tExpref: 0,
+ tColon: 0,
+ tPipe: 1,
+ tOr: 2,
+ tAnd: 3,
+ tEQ: 5,
+ tLT: 5,
+ tLTE: 5,
+ tGT: 5,
+ tGTE: 5,
+ tNE: 5,
+ tFlatten: 9,
+ tStar: 20,
+ tFilter: 21,
+ tDot: 40,
+ tNot: 45,
+ tLbrace: 50,
+ tLbracket: 55,
+ tLparen: 60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+ expression string
+ tokens []token
+ index int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+ p := Parser{}
+ return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+ lexer := NewLexer()
+ p.expression = expression
+ p.index = 0
+ tokens, err := lexer.tokenize(expression)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ p.tokens = tokens
+ parsed, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() != tEOF {
+ return ASTNode{}, p.syntaxError(fmt.Sprintf(
+ "Unexpected token at the end of the expresssion: %s", p.current()))
+ }
+ return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+ var err error
+ leftToken := p.lookaheadToken(0)
+ p.advance()
+ leftNode, err := p.nud(leftToken)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken := p.current()
+ for bindingPower < bindingPowers[currentToken] {
+ p.advance()
+ leftNode, err = p.led(currentToken, leftNode)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken = p.current()
+ }
+ return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+ if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+ return p.parseSliceExpression()
+ }
+ indexStr := p.lookaheadToken(0).value
+ parsedInt, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+ p.advance()
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+ parts := []*int{nil, nil, nil}
+ index := 0
+ current := p.current()
+ for current != tRbracket && index < 3 {
+ if current == tColon {
+ index++
+ p.advance()
+ } else if current == tNumber {
+ parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ parts[index] = &parsedInt
+ p.advance()
+ } else {
+ return ASTNode{}, p.syntaxError(
+ "Expected tColon or tNumber" + ", received: " + p.current().String())
+ }
+ current = p.current()
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTSlice,
+ value: parts,
+ }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+ if p.current() == tokenType {
+ p.advance()
+ return nil
+ }
+ return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+ switch tokenType {
+ case tDot:
+ if p.current() != tStar {
+ right, err := p.parseDotRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTSubexpression,
+ children: []ASTNode{node, right},
+ }, err
+ }
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTValueProjection,
+ children: []ASTNode{node, right},
+ }, err
+ case tPipe:
+ right, err := p.parseExpression(bindingPowers[tPipe])
+ return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+ case tOr:
+ right, err := p.parseExpression(bindingPowers[tOr])
+ return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+ case tAnd:
+ right, err := p.parseExpression(bindingPowers[tAnd])
+ return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+ case tLparen:
+ name := node.value
+ var args []ASTNode
+ for p.current() != tRparen {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tComma {
+ if err := p.match(tComma); err != nil {
+ return ASTNode{}, err
+ }
+ }
+ args = append(args, expression)
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTFunctionExpression,
+ value: name,
+ children: args,
+ }, nil
+ case tFilter:
+ return p.parseFilter(node)
+ case tFlatten:
+ left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{left, right},
+ }, err
+ case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+ right, err := p.parseExpression(bindingPowers[tokenType])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTComparator,
+ value: tokenType,
+ children: []ASTNode{node, right},
+ }, nil
+ case tLbracket:
+ tokenType := p.current()
+ var right ASTNode
+ var err error
+ if tokenType == tNumber || tokenType == tColon {
+ right, err = p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.projectIfSlice(node, right)
+ }
+ // Otherwise this is a projection.
+ if err := p.match(tStar); err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{node, right},
+ }, nil
+ }
+ return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+ switch token.tokenType {
+ case tJSONLiteral:
+ var parsed interface{}
+ err := json.Unmarshal([]byte(token.value), &parsed)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+ case tStringLiteral:
+ return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+ case tUnquotedIdentifier:
+ return ASTNode{
+ nodeType: ASTField,
+ value: token.value,
+ }, nil
+ case tQuotedIdentifier:
+ node := ASTNode{nodeType: ASTField, value: token.value}
+ if p.current() == tLparen {
+ return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+ }
+ return node, nil
+ case tStar:
+ left := ASTNode{nodeType: ASTIdentity}
+ var right ASTNode
+ var err error
+ if p.current() == tRbracket {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ }
+ return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+ case tFilter:
+ return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+ case tLbrace:
+ return p.parseMultiSelectHash()
+ case tFlatten:
+ left := ASTNode{
+ nodeType: ASTFlatten,
+ children: []ASTNode{{nodeType: ASTIdentity}},
+ }
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+ case tLbracket:
+ tokenType := p.current()
+ //var right ASTNode
+ if tokenType == tNumber || tokenType == tColon {
+ right, err := p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+ } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+ p.advance()
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{{nodeType: ASTIdentity}, right},
+ }, nil
+ } else {
+ return p.parseMultiSelectList()
+ }
+ case tCurrent:
+ return ASTNode{nodeType: ASTCurrentNode}, nil
+ case tExpref:
+ expression, err := p.parseExpression(bindingPowers[tExpref])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+ case tNot:
+ expression, err := p.parseExpression(bindingPowers[tNot])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+ case tLparen:
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return expression, nil
+ case tEOF:
+ return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+ }
+
+ return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+ var expressions []ASTNode
+ for {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ expressions = append(expressions, expression)
+ if p.current() == tRbracket {
+ break
+ }
+ err = p.match(tComma)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+ err := p.match(tRbracket)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectList,
+ children: expressions,
+ }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+ var children []ASTNode
+ for {
+ keyToken := p.lookaheadToken(0)
+ if err := p.match(tUnquotedIdentifier); err != nil {
+ if err := p.match(tQuotedIdentifier); err != nil {
+ return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+ }
+ }
+ keyName := keyToken.value
+ err := p.match(tColon)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ value, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ node := ASTNode{
+ nodeType: ASTKeyValPair,
+ value: keyName,
+ children: []ASTNode{value},
+ }
+ children = append(children, node)
+ if p.current() == tComma {
+ err := p.match(tComma)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ } else if p.current() == tRbrace {
+ err := p.match(tRbrace)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ break
+ }
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectHash,
+ children: children,
+ }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+ indexExpr := ASTNode{
+ nodeType: ASTIndexExpression,
+ children: []ASTNode{left, right},
+ }
+ if right.nodeType == ASTSlice {
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{indexExpr, right},
+ }, err
+ }
+ return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+ var right, condition ASTNode
+ var err error
+ condition, err = p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tFlatten {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+
+ return ASTNode{
+ nodeType: ASTFilterProjection,
+ children: []ASTNode{node, right, condition},
+ }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+ lookahead := p.current()
+ if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+ return p.parseExpression(bindingPower)
+ } else if lookahead == tLbracket {
+ if err := p.match(tLbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectList()
+ } else if lookahead == tLbrace {
+ if err := p.match(tLbrace); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectHash()
+ }
+ return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+ current := p.current()
+ if bindingPowers[current] < 10 {
+ return ASTNode{nodeType: ASTIdentity}, nil
+ } else if current == tLbracket {
+ return p.parseExpression(bindingPower)
+ } else if current == tFilter {
+ return p.parseExpression(bindingPower)
+ } else if current == tDot {
+ err := p.match(tDot)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseDotRHS(bindingPower)
+ } else {
+ return ASTNode{}, p.syntaxError("Error")
+ }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+ return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+ return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+ return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+ p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+ for _, elem := range elements {
+ if elem == token {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: p.lookaheadToken(0).position,
+ }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: t.position,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644
index 0000000..dae79cb
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+ if i < 0 || i >= tokType(len(_tokType_index)-1) {
+ return fmt.Sprintf("tokType(%d)", i)
+ }
+ return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100644
index 0000000..ddc1b7d
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+ switch v := value.(type) {
+ case bool:
+ return !v
+ case []interface{}:
+ return len(v) == 0
+ case map[string]interface{}:
+ return len(v) == 0
+ case string:
+ return len(v) == 0
+ case nil:
+ return true
+ }
+ // Try the reflection cases before returning false.
+ rv := reflect.ValueOf(value)
+ switch rv.Kind() {
+ case reflect.Struct:
+ // A struct type will never be false, even if
+ // all of its values are the zero type.
+ return false
+ case reflect.Slice, reflect.Map:
+ return rv.Len() == 0
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return true
+ }
+ // If it's a pointer type, we'll try to deref the pointer
+ // and evaluate the pointer value for isFalse.
+ element := rv.Elem()
+ return isFalse(element.Interface())
+ }
+ return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+ return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+ N int
+ Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+ computed, err := computeSliceParams(len(slice), parts)
+ if err != nil {
+ return nil, err
+ }
+ start, stop, step := computed[0], computed[1], computed[2]
+ result := []interface{}{}
+ if step > 0 {
+ for i := start; i < stop; i += step {
+ result = append(result, slice[i])
+ }
+ } else {
+ for i := start; i > stop; i += step {
+ result = append(result, slice[i])
+ }
+ }
+ return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+ var start, stop, step int
+ if !parts[2].Specified {
+ step = 1
+ } else if parts[2].N == 0 {
+ return nil, errors.New("Invalid slice, step cannot be 0")
+ } else {
+ step = parts[2].N
+ }
+ var stepValueNegative bool
+ if step < 0 {
+ stepValueNegative = true
+ } else {
+ stepValueNegative = false
+ }
+
+ if !parts[0].Specified {
+ if stepValueNegative {
+ start = length - 1
+ } else {
+ start = 0
+ }
+ } else {
+ start = capSlice(length, parts[0].N, step)
+ }
+
+ if !parts[1].Specified {
+ if stepValueNegative {
+ stop = -1
+ } else {
+ stop = length
+ }
+ } else {
+ stop = capSlice(length, parts[1].N, step)
+ }
+ return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+ if actual < 0 {
+ actual += length
+ if actual < 0 {
+ if step < 0 {
+ actual = -1
+ } else {
+ actual = 0
+ }
+ }
+ } else if actual >= length {
+ if step < 0 {
+ actual = length - 1
+ } else {
+ actual = length
+ }
+ }
+ return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]float64, len(d))
+ for i, el := range d {
+ item, ok := el.(float64)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false. If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]string, len(d))
+ for i, el := range d {
+ item, ok := el.(string)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+ if v == nil {
+ return false
+ }
+ return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml
new file mode 100644
index 0000000..20dd53b
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/.travis.yml
@@ -0,0 +1,23 @@
+language: go
+sudo: false
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
+notifications:
+ email: false
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
new file mode 100644
index 0000000..926d549
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013-2018 by Maxim Bublis
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
new file mode 100644
index 0000000..7b1a722
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/README.md
@@ -0,0 +1,65 @@
+# UUID package for Go language
+
+[](https://travis-ci.org/satori/go.uuid)
+[](https://coveralls.io/github/satori/go.uuid)
+[](http://godoc.org/github.com/satori/go.uuid)
+
+This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
+
+With 100% test coverage and benchmarks out of box.
+
+Supported versions:
+* Version 1, based on timestamp and MAC address (RFC 4122)
+* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
+* Version 3, based on MD5 hashing (RFC 4122)
+* Version 4, based on random numbers (RFC 4122)
+* Version 5, based on SHA-1 hashing (RFC 4122)
+
+## Installation
+
+Use the `go` command:
+
+ $ go get github.com/satori/go.uuid
+
+## Requirements
+
+UUID package requires Go >= 1.2.
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/satori/go.uuid"
+)
+
+func main() {
+ // Creating UUID Version 4
+ u1 := uuid.NewV4()
+ fmt.Printf("UUIDv4: %s\n", u1)
+
+ // Parsing UUID from string input
+ u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ if err != nil {
+ fmt.Printf("Something gone wrong: %s", err)
+ }
+ fmt.Printf("Successfully parsed: %s", u2)
+}
+```
+
+## Documentation
+
+[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
+
+## Links
+* [RFC 4122](http://tools.ietf.org/html/rfc4122)
+* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
+
+## Copyright
+
+Copyright (C) 2013-2018 by Maxim Bublis .
+
+UUID package released under MIT License.
+See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go
new file mode 100644
index 0000000..656892c
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/codec.go
@@ -0,0 +1,206 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+)
+
+// FromBytes returns UUID converted from raw byte slice input.
+// It will return error if the slice isn't 16 bytes long.
+func FromBytes(input []byte) (u UUID, err error) {
+ err = u.UnmarshalBinary(input)
+ return
+}
+
+// FromBytesOrNil returns UUID converted from raw byte slice input.
+// Same behavior as FromBytes, but returns a Nil UUID on error.
+func FromBytesOrNil(input []byte) UUID {
+ uuid, err := FromBytes(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// FromString returns UUID parsed from string input.
+// Input is expected in a form accepted by UnmarshalText.
+func FromString(input string) (u UUID, err error) {
+ err = u.UnmarshalText([]byte(input))
+ return
+}
+
+// FromStringOrNil returns UUID parsed from string input.
+// Same behavior as FromString, but returns a Nil UUID on error.
+func FromStringOrNil(input string) UUID {
+ uuid, err := FromString(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by String.
+func (u UUID) MarshalText() (text []byte, err error) {
+ text = []byte(u.String())
+ return
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Following formats are supported:
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+// "6ba7b8109dad11d180b400c04fd430c8"
+// ABNF for supported UUID text representation follows:
+// uuid := canonical | hashlike | braced | urn
+// plain := canonical | hashlike
+// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
+// hashlike := 12hexoct
+// braced := '{' plain '}'
+// urn := URN ':' UUID-NID ':' plain
+// URN := 'urn'
+// UUID-NID := 'uuid'
+// 12hexoct := 6hexoct 6hexoct
+// 6hexoct := 4hexoct 2hexoct
+// 4hexoct := 2hexoct 2hexoct
+// 2hexoct := hexoct hexoct
+// hexoct := hexdig hexdig
+// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
+// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
+// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
+func (u *UUID) UnmarshalText(text []byte) (err error) {
+ switch len(text) {
+ case 32:
+ return u.decodeHashLike(text)
+ case 36:
+ return u.decodeCanonical(text)
+ case 38:
+ return u.decodeBraced(text)
+ case 41:
+ fallthrough
+ case 45:
+ return u.decodeURN(text)
+ default:
+ return fmt.Errorf("uuid: incorrect UUID length: %s", text)
+ }
+}
+
+// decodeCanonical decodes UUID string in format
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
+func (u *UUID) decodeCanonical(t []byte) (err error) {
+ if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
+ return fmt.Errorf("uuid: incorrect UUID format %s", t)
+ }
+
+ src := t[:]
+ dst := u[:]
+
+ for i, byteGroup := range byteGroups {
+ if i > 0 {
+ src = src[1:] // skip dash
+ }
+ _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup])
+ if err != nil {
+ return
+ }
+ src = src[byteGroup:]
+ dst = dst[byteGroup/2:]
+ }
+
+ return
+}
+
+// decodeHashLike decodes UUID string in format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeHashLike(t []byte) (err error) {
+ src := t[:]
+ dst := u[:]
+
+ if _, err = hex.Decode(dst, src); err != nil {
+ return err
+ }
+ return
+}
+
+// decodeBraced decodes UUID string in format
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format
+// "{6ba7b8109dad11d180b400c04fd430c8}".
+func (u *UUID) decodeBraced(t []byte) (err error) {
+ l := len(t)
+
+ if t[0] != '{' || t[l-1] != '}' {
+ return fmt.Errorf("uuid: incorrect UUID format %s", t)
+ }
+
+ return u.decodePlain(t[1 : l-1])
+}
+
+// decodeURN decodes UUID string in format
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format
+// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeURN(t []byte) (err error) {
+ total := len(t)
+
+ urn_uuid_prefix := t[:9]
+
+ if !bytes.Equal(urn_uuid_prefix, urnPrefix) {
+ return fmt.Errorf("uuid: incorrect UUID format: %s", t)
+ }
+
+ return u.decodePlain(t[9:total])
+}
+
+// decodePlain decodes UUID string in canonical format
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodePlain(t []byte) (err error) {
+ switch len(t) {
+ case 32:
+ return u.decodeHashLike(t)
+ case 36:
+ return u.decodeCanonical(t)
+ default:
+ return fmt.Errorf("uuid: incorrrect UUID length: %s", t)
+ }
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u UUID) MarshalBinary() (data []byte, err error) {
+ data = u.Bytes()
+ return
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It will return error if the slice isn't 16 bytes long.
+func (u *UUID) UnmarshalBinary(data []byte) (err error) {
+ if len(data) != Size {
+ err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
+ return
+ }
+ copy(u[:], data)
+
+ return
+}
diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go
new file mode 100644
index 0000000..3f2f1da
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/generator.go
@@ -0,0 +1,239 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/binary"
+ "hash"
+ "net"
+ "os"
+ "sync"
+ "time"
+)
+
+// Difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
+const epochStart = 122192928000000000
+
+var (
+ global = newDefaultGenerator()
+
+ epochFunc = unixTimeFunc
+ posixUID = uint32(os.Getuid())
+ posixGID = uint32(os.Getgid())
+)
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func NewV1() UUID {
+ return global.NewV1()
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func NewV2(domain byte) UUID {
+ return global.NewV2(domain)
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func NewV3(ns UUID, name string) UUID {
+ return global.NewV3(ns, name)
+}
+
+// NewV4 returns random generated UUID.
+func NewV4() UUID {
+ return global.NewV4()
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func NewV5(ns UUID, name string) UUID {
+ return global.NewV5(ns, name)
+}
+
+// Generator provides interface for generating UUIDs.
+type Generator interface {
+ NewV1() UUID
+ NewV2(domain byte) UUID
+ NewV3(ns UUID, name string) UUID
+ NewV4() UUID
+ NewV5(ns UUID, name string) UUID
+}
+
+// Default generator implementation.
+type generator struct {
+ storageOnce sync.Once
+ storageMutex sync.Mutex
+
+ lastTime uint64
+ clockSequence uint16
+ hardwareAddr [6]byte
+}
+
+func newDefaultGenerator() Generator {
+ return &generator{}
+}
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func (g *generator) NewV1() UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := g.getStorage()
+
+ binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(V1)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func (g *generator) NewV2(domain byte) UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := g.getStorage()
+
+ switch domain {
+ case DomainPerson:
+ binary.BigEndian.PutUint32(u[0:], posixUID)
+ case DomainGroup:
+ binary.BigEndian.PutUint32(u[0:], posixGID)
+ }
+
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+ u[9] = domain
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(V2)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func (g *generator) NewV3(ns UUID, name string) UUID {
+ u := newFromHash(md5.New(), ns, name)
+ u.SetVersion(V3)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV4 returns random generated UUID.
+func (g *generator) NewV4() UUID {
+ u := UUID{}
+ g.safeRandom(u[:])
+ u.SetVersion(V4)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func (g *generator) NewV5(ns UUID, name string) UUID {
+ u := newFromHash(sha1.New(), ns, name)
+ u.SetVersion(V5)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+func (g *generator) initStorage() {
+ g.initClockSequence()
+ g.initHardwareAddr()
+}
+
+func (g *generator) initClockSequence() {
+ buf := make([]byte, 2)
+ g.safeRandom(buf)
+ g.clockSequence = binary.BigEndian.Uint16(buf)
+}
+
+func (g *generator) initHardwareAddr() {
+ interfaces, err := net.Interfaces()
+ if err == nil {
+ for _, iface := range interfaces {
+ if len(iface.HardwareAddr) >= 6 {
+ copy(g.hardwareAddr[:], iface.HardwareAddr)
+ return
+ }
+ }
+ }
+
+ // Initialize hardwareAddr randomly in case
+ // of real network interfaces absence
+ g.safeRandom(g.hardwareAddr[:])
+
+ // Set multicast bit as recommended in RFC 4122
+ g.hardwareAddr[0] |= 0x01
+}
+
+func (g *generator) safeRandom(dest []byte) {
+ if _, err := rand.Read(dest); err != nil {
+ panic(err)
+ }
+}
+
+// Returns UUID v1/v2 storage state.
+// Returns epoch timestamp, clock sequence, and hardware address.
+func (g *generator) getStorage() (uint64, uint16, []byte) {
+ g.storageOnce.Do(g.initStorage)
+
+ g.storageMutex.Lock()
+ defer g.storageMutex.Unlock()
+
+ timeNow := epochFunc()
+ // Clock changed backwards since last UUID generation.
+ // Should increase clock sequence.
+ if timeNow <= g.lastTime {
+ g.clockSequence++
+ }
+ g.lastTime = timeNow
+
+ return timeNow, g.clockSequence, g.hardwareAddr[:]
+}
+
+// Returns difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and current time.
+// This is default epoch calculation function.
+func unixTimeFunc() uint64 {
+ return epochStart + uint64(time.Now().UnixNano()/100)
+}
+
+// Returns UUID based on hashing of namespace UUID and name.
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+ u := UUID{}
+ h.Write(ns[:])
+ h.Write([]byte(name))
+ copy(u[:], h.Sum(nil))
+
+ return u
+}
diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go
new file mode 100644
index 0000000..56759d3
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/sql.go
@@ -0,0 +1,78 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Value implements the driver.Valuer interface.
+func (u UUID) Value() (driver.Value, error) {
+ return u.String(), nil
+}
+
+// Scan implements the sql.Scanner interface.
+// A 16-byte slice is handled by UnmarshalBinary, while
+// a longer byte slice or a string is handled by UnmarshalText.
+func (u *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case []byte:
+ if len(src) == Size {
+ return u.UnmarshalBinary(src)
+ }
+ return u.UnmarshalText(src)
+
+ case string:
+ return u.UnmarshalText([]byte(src))
+ }
+
+ return fmt.Errorf("uuid: cannot convert %T to UUID", src)
+}
+
+// NullUUID can be used with the standard sql package to represent a
+// UUID value that can be NULL in the database
+type NullUUID struct {
+ UUID UUID
+ Valid bool
+}
+
+// Value implements the driver.Valuer interface.
+func (u NullUUID) Value() (driver.Value, error) {
+ if !u.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return u.UUID.Value()
+}
+
+// Scan implements the sql.Scanner interface.
+func (u *NullUUID) Scan(src interface{}) error {
+ if src == nil {
+ u.UUID, u.Valid = Nil, false
+ return nil
+ }
+
+ // Delegate to UUID Scan function
+ u.Valid = true
+ return u.UUID.Scan(src)
+}
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
new file mode 100644
index 0000000..a2b8e2c
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/uuid.go
@@ -0,0 +1,161 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// Package uuid provides implementation of Universally Unique Identifier (UUID).
+// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
+// version 2 (as specified in DCE 1.1).
+package uuid
+
+import (
+ "bytes"
+ "encoding/hex"
+)
+
+// Size of a UUID in bytes.
+const Size = 16
+
+// UUID representation compliant with specification
+// described in RFC 4122.
+type UUID [Size]byte
+
+// UUID versions
+const (
+ _ byte = iota
+ V1
+ V2
+ V3
+ V4
+ V5
+)
+
+// UUID layout variants.
+const (
+ VariantNCS byte = iota
+ VariantRFC4122
+ VariantMicrosoft
+ VariantFuture
+)
+
+// UUID DCE domains.
+const (
+ DomainPerson = iota
+ DomainGroup
+ DomainOrg
+)
+
+// String parse helpers.
+var (
+ urnPrefix = []byte("urn:uuid:")
+ byteGroups = []int{8, 4, 4, 4, 12}
+)
+
+// Nil is special form of UUID that is specified to have all
+// 128 bits set to zero.
+var Nil = UUID{}
+
+// Predefined namespace UUIDs.
+var (
+ NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+)
+
+// Equal returns true if u1 and u2 equals, otherwise returns false.
+func Equal(u1 UUID, u2 UUID) bool {
+ return bytes.Equal(u1[:], u2[:])
+}
+
+// Version returns algorithm version used to generate UUID.
+func (u UUID) Version() byte {
+ return u[6] >> 4
+}
+
+// Variant returns UUID layout variant.
+func (u UUID) Variant() byte {
+ switch {
+ case (u[8] >> 7) == 0x00:
+ return VariantNCS
+ case (u[8] >> 6) == 0x02:
+ return VariantRFC4122
+ case (u[8] >> 5) == 0x06:
+ return VariantMicrosoft
+ case (u[8] >> 5) == 0x07:
+ fallthrough
+ default:
+ return VariantFuture
+ }
+}
+
+// Bytes returns bytes slice representation of UUID.
+func (u UUID) Bytes() []byte {
+ return u[:]
+}
+
+// Returns canonical string representation of UUID:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
+func (u UUID) String() string {
+ buf := make([]byte, 36)
+
+ hex.Encode(buf[0:8], u[0:4])
+ buf[8] = '-'
+ hex.Encode(buf[9:13], u[4:6])
+ buf[13] = '-'
+ hex.Encode(buf[14:18], u[6:8])
+ buf[18] = '-'
+ hex.Encode(buf[19:23], u[8:10])
+ buf[23] = '-'
+ hex.Encode(buf[24:], u[10:])
+
+ return string(buf)
+}
+
+// SetVersion sets version bits.
+func (u *UUID) SetVersion(v byte) {
+ u[6] = (u[6] & 0x0f) | (v << 4)
+}
+
+// SetVariant sets variant bits.
+func (u *UUID) SetVariant(v byte) {
+ switch v {
+ case VariantNCS:
+ u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
+ case VariantRFC4122:
+ u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
+ case VariantMicrosoft:
+ u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
+ case VariantFuture:
+ fallthrough
+ default:
+ u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
+ }
+}
+
+// Must is a helper that wraps a call to a function returning (UUID, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
+func Must(u UUID, err error) UUID {
+ if err != nil {
+ panic(err)
+ }
+ return u
+}