From f26c1d4a1fb7650212526e070a2ddfa88d3a94b8 Mon Sep 17 00:00:00 2001 From: faker Date: Wed, 20 Nov 2019 21:54:42 +0800 Subject: [PATCH 1/7] =?UTF-8?q?=E6=94=B9=E4=B8=BAgo=20mod?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../AdminControllers/BannerController.go | 10 +- .../AdminControllers/BaseController.go | 10 +- controllers/AdminControllers/DocController.go | 9 +- .../AdminControllers/FriendController.go | 6 +- .../AdminControllers/KindEditorController.go | 4 +- .../AdminControllers/LoginController.go | 6 +- .../AdminControllers/ReportController.go | 2 +- .../AdminControllers/ScoreController.go | 2 +- controllers/AdminControllers/SeoController.go | 2 +- .../AdminControllers/SingleController.go | 3 +- controllers/AdminControllers/SysController.go | 14 +- .../AdminControllers/UserController.go | 8 +- controllers/HomeControllers/BaseController.go | 8 +- .../HomeControllers/CollectController.go | 5 +- .../HomeControllers/IndexController.go | 6 +- .../HomeControllers/InstallController.go | 5 +- controllers/HomeControllers/ListController.go | 11 +- .../HomeControllers/ReportController.go | 5 +- .../HomeControllers/SearchController.go | 8 +- .../HomeControllers/StaticController.go | 3 +- .../HomeControllers/UploadController.go | 8 +- controllers/HomeControllers/UserController.go | 15 +- controllers/HomeControllers/ViewController.go | 7 +- go.mod | 31 + helper/config.go | 7 +- helper/convert.go | 3 +- helper/helper.go | 38 +- helper/logs.go | 3 +- main.go | 15 +- models/AdminModel.go | 3 +- models/BannerModel.go | 3 +- models/CategoryModel.go | 3 +- models/CloudStoreModel.go | 4 +- models/CollectModel.go | 3 +- models/ConfigModel.go | 5 +- models/DocumentModel.go | 10 +- models/ElasticSearchModel.go | 14 +- models/Install.go | 3 +- models/Models.go | 21 +- models/RecycleModel.go | 4 +- models/RemarkModel.go | 4 +- models/SeoModel.go | 12 +- models/UserModel.go | 4 +- models/Utils.go | 4 +- models/WordModel.go | 3 +- routers/router.go | 6 +- .../TruthHun/gotil/cryptil/cryptil.go | 70 - .../TruthHun/gotil/cryptil/readme.md | 1 - vendor/github.com/TruthHun/gotil/util/util.go | 222 - vendor/github.com/adamzy/cedar-go/LICENSE.md | 339 -- vendor/github.com/adamzy/cedar-go/README.md | 83 - vendor/github.com/adamzy/cedar-go/api.go | 231 - vendor/github.com/adamzy/cedar-go/cedar.go | 407 -- vendor/github.com/adamzy/cedar-go/doc.go | 12 - vendor/github.com/adamzy/cedar-go/errors.go | 11 - vendor/github.com/adamzy/cedar-go/io.go | 63 - .../aliyun/aliyun-oss-go-sdk/CHANGELOG.md | 89 - .../aliyun/aliyun-oss-go-sdk/README-CN.md | 167 - .../aliyun/aliyun-oss-go-sdk/README.md | 166 - .../aliyun/aliyun-oss-go-sdk/oss/auth.go | 92 - .../aliyun/aliyun-oss-go-sdk/oss/bucket.go | 618 --- .../aliyun/aliyun-oss-go-sdk/oss/client.go | 739 --- .../aliyun/aliyun-oss-go-sdk/oss/conf.go | 67 - .../aliyun/aliyun-oss-go-sdk/oss/conn.go | 420 -- .../aliyun/aliyun-oss-go-sdk/oss/const.go | 82 - .../aliyun/aliyun-oss-go-sdk/oss/crc.go | 44 - .../aliyun/aliyun-oss-go-sdk/oss/download.go | 399 -- .../aliyun/aliyun-oss-go-sdk/oss/error.go | 82 - .../aliyun/aliyun-oss-go-sdk/oss/mime.go | 245 - .../aliyun/aliyun-oss-go-sdk/oss/model.go | 60 - .../aliyun/aliyun-oss-go-sdk/oss/multicopy.go | 414 -- .../aliyun/aliyun-oss-go-sdk/oss/multipart.go | 280 -- .../aliyun/aliyun-oss-go-sdk/oss/option.go | 346 -- .../aliyun/aliyun-oss-go-sdk/oss/type.go | 442 -- .../aliyun/aliyun-oss-go-sdk/oss/upload.go | 438 -- .../aliyun/aliyun-oss-go-sdk/oss/utils.go | 165 - .../aliyun/aliyun-oss-go-sdk/sample.go | 36 - .../github.com/andybalholm/cascadia/LICENSE | 24 - .../github.com/andybalholm/cascadia/README.md | 7 - vendor/github.com/andybalholm/cascadia/go.mod | 3 - .../github.com/andybalholm/cascadia/parser.go | 835 ---- .../andybalholm/cascadia/selector.go | 622 --- vendor/github.com/baidubce/bce-sdk-go/LICENSE | 177 - .../github.com/baidubce/bce-sdk-go/README.md | 1589 ------ vendor/github.com/baidubce/bce-sdk-go/init.go | 29 - .../denverdino/aliyungo/LICENSE.txt | 191 - .../denverdino/aliyungo/common/client.go | 268 -- .../denverdino/aliyungo/common/endpoint.go | 118 - .../denverdino/aliyungo/common/endpoints.xml | 1351 ------ .../denverdino/aliyungo/common/regions.go | 34 - .../denverdino/aliyungo/common/request.go | 101 - .../denverdino/aliyungo/common/types.go | 89 - .../denverdino/aliyungo/common/version.go | 3 - .../aliyungo/oss/authenticate_callback.go | 92 - .../denverdino/aliyungo/oss/client.go | 1394 ------ .../denverdino/aliyungo/oss/export.go | 23 - .../denverdino/aliyungo/oss/multi.go | 489 -- .../denverdino/aliyungo/oss/regions.go | 78 - .../denverdino/aliyungo/oss/signature.go | 107 - .../denverdino/aliyungo/util/attempt.go | 76 - .../denverdino/aliyungo/util/encoding.go | 152 - .../denverdino/aliyungo/util/iso6801.go | 80 - .../denverdino/aliyungo/util/signature.go | 40 - .../denverdino/aliyungo/util/util.go | 147 - .../github.com/disintegration/imaging/LICENSE | 21 - .../disintegration/imaging/README.md | 198 - .../disintegration/imaging/adjust.go | 200 - .../disintegration/imaging/effects.go | 189 - .../disintegration/imaging/helpers.go | 400 -- .../disintegration/imaging/histogram.go | 43 - .../disintegration/imaging/resize.go | 585 --- .../disintegration/imaging/tools.go | 201 - .../disintegration/imaging/transform.go | 201 - .../disintegration/imaging/utils.go | 77 - vendor/github.com/go-sql-driver/mysql/AUTHORS | 52 - .../go-sql-driver/mysql/CHANGELOG.md | 103 - .../go-sql-driver/mysql/CONTRIBUTING.md | 23 - .../go-sql-driver/mysql/ISSUE_TEMPLATE.md | 21 - vendor/github.com/go-sql-driver/mysql/LICENSE | 373 -- .../mysql/PULL_REQUEST_TEMPLATE.md | 9 - .../github.com/go-sql-driver/mysql/README.md | 420 -- .../go-sql-driver/mysql/appengine.go | 19 - .../github.com/go-sql-driver/mysql/buffer.go | 147 - .../go-sql-driver/mysql/collations.go | 250 - .../go-sql-driver/mysql/connection.go | 372 -- .../github.com/go-sql-driver/mysql/const.go | 163 - .../github.com/go-sql-driver/mysql/driver.go | 167 - vendor/github.com/go-sql-driver/mysql/dsn.go | 513 -- .../github.com/go-sql-driver/mysql/errors.go | 131 - .../github.com/go-sql-driver/mysql/infile.go | 181 - .../github.com/go-sql-driver/mysql/packets.go | 1246 ----- .../github.com/go-sql-driver/mysql/result.go | 22 - vendor/github.com/go-sql-driver/mysql/rows.go | 112 - .../go-sql-driver/mysql/statement.go | 150 - .../go-sql-driver/mysql/transaction.go | 31 - .../github.com/go-sql-driver/mysql/utils.go | 740 --- vendor/github.com/huichen/sego/README.md | 43 - vendor/github.com/huichen/sego/dictionary.go | 65 - vendor/github.com/huichen/sego/license.txt | 13 - vendor/github.com/huichen/sego/segment.go | 28 - vendor/github.com/huichen/sego/segmenter.go | 295 -- vendor/github.com/huichen/sego/test_utils.go | 38 - vendor/github.com/huichen/sego/token.go | 50 - vendor/github.com/huichen/sego/utils.go | 93 - .../github.com/mozillazg/go-cos/CHANGELOG.md | 105 - vendor/github.com/mozillazg/go-cos/LICENSE | 21 - vendor/github.com/mozillazg/go-cos/Makefile | 21 - vendor/github.com/mozillazg/go-cos/README.md | 101 - vendor/github.com/mozillazg/go-cos/auth.go | 253 - vendor/github.com/mozillazg/go-cos/bucket.go | 107 - .../github.com/mozillazg/go-cos/bucket_acl.go | 62 - .../mozillazg/go-cos/bucket_cors.go | 77 - .../mozillazg/go-cos/bucket_lifecycle.go | 103 - .../mozillazg/go-cos/bucket_location.go | 30 - .../mozillazg/go-cos/bucket_part.go | 59 - .../mozillazg/go-cos/bucket_tagging.go | 75 - vendor/github.com/mozillazg/go-cos/cos.go | 378 -- vendor/github.com/mozillazg/go-cos/doc.go | 29 - vendor/github.com/mozillazg/go-cos/error.go | 41 - vendor/github.com/mozillazg/go-cos/helper.go | 85 - vendor/github.com/mozillazg/go-cos/object.go | 339 -- .../github.com/mozillazg/go-cos/object_acl.go | 63 - .../mozillazg/go-cos/object_part.go | 177 - vendor/github.com/mozillazg/go-cos/service.go | 37 - vendor/github.com/qiniu/api.v7/CHANGELOG.md | 65 - vendor/github.com/qiniu/api.v7/Makefile | 6 - vendor/github.com/qiniu/api.v7/README.md | 20 - .../github.com/qiniu/api.v7/auth/qbox/doc.go | 2 - .../qiniu/api.v7/auth/qbox/qbox_auth.go | 147 - .../github.com/qiniu/api.v7/cdn/anti_leech.go | 33 - vendor/github.com/qiniu/api.v7/cdn/api.go | 301 -- vendor/github.com/qiniu/api.v7/cdn/doc.go | 3 - vendor/github.com/qiniu/api.v7/conf/conf.go | 9 - vendor/github.com/qiniu/api.v7/conf/doc.go | 2 - vendor/github.com/qiniu/api.v7/doc.go | 20 - vendor/github.com/qiniu/api.v7/rtc/api.go | 246 - vendor/github.com/qiniu/api.v7/rtc/doc.go | 6 - vendor/github.com/qiniu/api.v7/rtc/util.go | 140 - .../qiniu/api.v7/storage/base64_upload.go | 173 - .../github.com/qiniu/api.v7/storage/bucket.go | 742 --- .../github.com/qiniu/api.v7/storage/config.go | 25 - vendor/github.com/qiniu/api.v7/storage/doc.go | 10 - .../qiniu/api.v7/storage/form_upload.go | 352 -- .../github.com/qiniu/api.v7/storage/pfop.go | 211 - .../qiniu/api.v7/storage/resume_base.go | 187 - .../qiniu/api.v7/storage/resume_upload.go | 316 -- vendor/github.com/qiniu/api.v7/storage/rpc.go | 395 -- .../github.com/qiniu/api.v7/storage/token.go | 73 - .../github.com/qiniu/api.v7/storage/util.go | 22 - .../github.com/qiniu/api.v7/storage/zone.go | 247 - vendor/github.com/qiniu/api.v7/test-env.sh | 10 - vendor/github.com/qiniu/x/bytes.v7/README.md | 4 - vendor/github.com/qiniu/x/bytes.v7/bytes.go | 177 - vendor/github.com/qiniu/x/bytes.v7/doc.go | 34 - vendor/github.com/qiniu/x/bytes.v7/replace.go | 54 - .../qiniu/x/bytes.v7/seekable/seekable.go | 63 - vendor/github.com/qiniu/x/reqid.v7/reqid.go | 52 - vendor/github.com/qiniu/x/xlog.v7/xlog.go | 211 - .../smartystreets/assertions/CONTRIBUTING.md | 12 - .../smartystreets/assertions/LICENSE.md | 23 - .../smartystreets/assertions/README.md | 575 --- .../assertions/assertions.goconvey | 3 - .../smartystreets/assertions/collections.go | 244 - .../smartystreets/assertions/doc.go | 105 - .../smartystreets/assertions/equality.go | 280 -- .../smartystreets/assertions/filter.go | 23 - .../assertions/internal/go-render/LICENSE | 27 - .../internal/go-render/render/render.go | 477 -- .../assertions/internal/oglematchers/LICENSE | 202 - .../internal/oglematchers/README.md | 58 - .../internal/oglematchers/all_of.go | 70 - .../assertions/internal/oglematchers/any.go | 32 - .../internal/oglematchers/any_of.go | 94 - .../internal/oglematchers/contains.go | 61 - .../internal/oglematchers/deep_equals.go | 88 - .../internal/oglematchers/elements_are.go | 91 - .../internal/oglematchers/equals.go | 541 --- .../assertions/internal/oglematchers/error.go | 51 - .../internal/oglematchers/greater_or_equal.go | 39 - .../internal/oglematchers/greater_than.go | 39 - .../internal/oglematchers/has_same_type_as.go | 37 - .../internal/oglematchers/has_substr.go | 46 - .../internal/oglematchers/identical_to.go | 134 - .../internal/oglematchers/less_or_equal.go | 41 - .../internal/oglematchers/less_than.go | 152 - .../internal/oglematchers/matcher.go | 86 - .../internal/oglematchers/matches_regexp.go | 69 - .../internal/oglematchers/new_matcher.go | 43 - .../assertions/internal/oglematchers/not.go | 53 - .../internal/oglematchers/panics.go | 74 - .../internal/oglematchers/pointee.go | 65 - .../oglematchers/transform_description.go | 36 - .../smartystreets/assertions/messages.go | 93 - .../smartystreets/assertions/panic.go | 115 - .../smartystreets/assertions/quantity.go | 141 - .../smartystreets/assertions/serializer.go | 69 - .../smartystreets/assertions/strings.go | 227 - .../smartystreets/assertions/time.go | 202 - .../smartystreets/assertions/type.go | 112 - .../smartystreets/goconvey/LICENSE.md | 23 - .../goconvey/convey/assertions.go | 68 - .../smartystreets/goconvey/convey/context.go | 272 -- .../goconvey/convey/convey.goconvey | 4 - .../goconvey/convey/discovery.go | 103 - .../smartystreets/goconvey/convey/doc.go | 218 - .../goconvey/convey/gotest/utils.go | 28 - .../smartystreets/goconvey/convey/init.go | 81 - .../goconvey/convey/nilReporter.go | 15 - .../goconvey/convey/reporting/console.go | 16 - .../goconvey/convey/reporting/doc.go | 5 - .../goconvey/convey/reporting/dot.go | 40 - .../goconvey/convey/reporting/gotest.go | 33 - .../goconvey/convey/reporting/init.go | 94 - .../goconvey/convey/reporting/json.go | 88 - .../goconvey/convey/reporting/printer.go | 57 - .../goconvey/convey/reporting/problems.go | 80 - .../goconvey/convey/reporting/reporter.go | 39 - .../convey/reporting/reporting.goconvey | 2 - .../goconvey/convey/reporting/reports.go | 179 - .../goconvey/convey/reporting/statistics.go | 108 - .../goconvey/convey/reporting/story.go | 73 - vendor/golang.org/x/crypto/LICENSE | 27 - vendor/golang.org/x/crypto/PATENTS | 22 - vendor/golang.org/x/crypto/acme/acme.go | 921 ---- .../x/crypto/acme/autocert/autocert.go | 1127 ----- .../x/crypto/acme/autocert/cache.go | 130 - .../x/crypto/acme/autocert/listener.go | 157 - .../x/crypto/acme/autocert/renewal.go | 141 - vendor/golang.org/x/crypto/acme/http.go | 281 -- vendor/golang.org/x/crypto/acme/jws.go | 153 - vendor/golang.org/x/crypto/acme/types.go | 329 -- vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - vendor/golang.org/x/net/html/atom/atom.go | 78 - vendor/golang.org/x/net/html/atom/gen.go | 648 --- vendor/golang.org/x/net/html/atom/table.go | 713 --- vendor/golang.org/x/net/html/const.go | 102 - vendor/golang.org/x/net/html/doc.go | 106 - vendor/golang.org/x/net/html/doctype.go | 156 - vendor/golang.org/x/net/html/entity.go | 2253 --------- vendor/golang.org/x/net/html/escape.go | 258 - vendor/golang.org/x/net/html/foreign.go | 226 - vendor/golang.org/x/net/html/node.go | 193 - vendor/golang.org/x/net/html/parse.go | 2094 -------- vendor/golang.org/x/net/html/render.go | 271 -- vendor/golang.org/x/net/html/token.go | 1219 ----- vendor/gopkg.in/gomail.v2/CHANGELOG.md | 20 - vendor/gopkg.in/gomail.v2/CONTRIBUTING.md | 20 - vendor/gopkg.in/gomail.v2/LICENSE | 20 - vendor/gopkg.in/gomail.v2/README.md | 92 - vendor/gopkg.in/gomail.v2/auth.go | 49 - vendor/gopkg.in/gomail.v2/doc.go | 5 - vendor/gopkg.in/gomail.v2/message.go | 322 -- vendor/gopkg.in/gomail.v2/mime.go | 21 - vendor/gopkg.in/gomail.v2/mime_go14.go | 25 - vendor/gopkg.in/gomail.v2/send.go | 116 - vendor/gopkg.in/gomail.v2/smtp.go | 202 - vendor/gopkg.in/gomail.v2/writeto.go | 306 -- vendor/gopkg.in/yaml.v2/LICENSE | 13 - vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 - vendor/gopkg.in/yaml.v2/README.md | 131 - vendor/gopkg.in/yaml.v2/apic.go | 742 --- vendor/gopkg.in/yaml.v2/decode.go | 683 --- vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ------- vendor/gopkg.in/yaml.v2/encode.go | 306 -- vendor/gopkg.in/yaml.v2/parserc.go | 1096 ----- vendor/gopkg.in/yaml.v2/readerc.go | 394 -- vendor/gopkg.in/yaml.v2/resolve.go | 203 - vendor/gopkg.in/yaml.v2/scannerc.go | 2710 ----------- vendor/gopkg.in/yaml.v2/sorter.go | 104 - vendor/gopkg.in/yaml.v2/writerc.go | 89 - vendor/gopkg.in/yaml.v2/yaml.go | 346 -- vendor/gopkg.in/yaml.v2/yamlh.go | 716 --- vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 - vendor/qiniupkg.com/x/bytes.v7/README.md | 4 - vendor/qiniupkg.com/x/bytes.v7/bytes.go | 177 - vendor/qiniupkg.com/x/bytes.v7/doc.go | 34 - vendor/qiniupkg.com/x/bytes.v7/replace.go | 54 - vendor/qiniupkg.com/x/log.v7/README.md | 4 - vendor/qiniupkg.com/x/log.v7/logext.go | 521 -- vendor/qiniupkg.com/x/reqid.v7/reqid.go | 52 - vendor/rsc.io/pdf/LICENSE | 27 - vendor/rsc.io/pdf/README.md | 3 - vendor/rsc.io/pdf/lex.go | 529 -- vendor/rsc.io/pdf/name.go | 4286 ----------------- vendor/rsc.io/pdf/page.go | 666 --- vendor/rsc.io/pdf/ps.go | 138 - vendor/rsc.io/pdf/read.go | 1079 ----- vendor/rsc.io/pdf/text.go | 158 - 329 files changed, 181 insertions(+), 66196 deletions(-) create mode 100644 go.mod delete mode 100644 vendor/github.com/TruthHun/gotil/cryptil/cryptil.go delete mode 100644 vendor/github.com/TruthHun/gotil/cryptil/readme.md delete mode 100644 vendor/github.com/TruthHun/gotil/util/util.go delete mode 100755 vendor/github.com/adamzy/cedar-go/LICENSE.md delete mode 100755 vendor/github.com/adamzy/cedar-go/README.md delete mode 100755 vendor/github.com/adamzy/cedar-go/api.go delete mode 100755 vendor/github.com/adamzy/cedar-go/cedar.go delete mode 100755 vendor/github.com/adamzy/cedar-go/doc.go delete mode 100755 vendor/github.com/adamzy/cedar-go/errors.go delete mode 100755 vendor/github.com/adamzy/cedar-go/io.go delete mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/CHANGELOG.md delete mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/README-CN.md delete mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/README.md delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go delete mode 100755 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go delete mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/sample.go delete mode 100755 vendor/github.com/andybalholm/cascadia/LICENSE delete mode 100644 vendor/github.com/andybalholm/cascadia/README.md delete mode 100644 vendor/github.com/andybalholm/cascadia/go.mod delete mode 100644 vendor/github.com/andybalholm/cascadia/parser.go delete mode 100644 vendor/github.com/andybalholm/cascadia/selector.go delete mode 100644 vendor/github.com/baidubce/bce-sdk-go/LICENSE delete mode 100644 vendor/github.com/baidubce/bce-sdk-go/README.md delete mode 100644 vendor/github.com/baidubce/bce-sdk-go/init.go delete mode 100644 vendor/github.com/denverdino/aliyungo/LICENSE.txt delete mode 100755 vendor/github.com/denverdino/aliyungo/common/client.go delete mode 100644 vendor/github.com/denverdino/aliyungo/common/endpoint.go delete mode 100644 vendor/github.com/denverdino/aliyungo/common/endpoints.xml delete mode 100644 vendor/github.com/denverdino/aliyungo/common/regions.go delete mode 100644 vendor/github.com/denverdino/aliyungo/common/request.go delete mode 100644 vendor/github.com/denverdino/aliyungo/common/types.go delete mode 100644 vendor/github.com/denverdino/aliyungo/common/version.go delete mode 100644 vendor/github.com/denverdino/aliyungo/oss/authenticate_callback.go delete mode 100644 vendor/github.com/denverdino/aliyungo/oss/client.go delete mode 100644 vendor/github.com/denverdino/aliyungo/oss/export.go delete mode 100644 vendor/github.com/denverdino/aliyungo/oss/multi.go delete mode 100644 vendor/github.com/denverdino/aliyungo/oss/regions.go delete mode 100644 vendor/github.com/denverdino/aliyungo/oss/signature.go delete mode 100644 vendor/github.com/denverdino/aliyungo/util/attempt.go delete mode 100644 vendor/github.com/denverdino/aliyungo/util/encoding.go delete mode 100644 vendor/github.com/denverdino/aliyungo/util/iso6801.go delete mode 100644 vendor/github.com/denverdino/aliyungo/util/signature.go delete mode 100644 vendor/github.com/denverdino/aliyungo/util/util.go delete mode 100755 vendor/github.com/disintegration/imaging/LICENSE delete mode 100755 vendor/github.com/disintegration/imaging/README.md delete mode 100755 vendor/github.com/disintegration/imaging/adjust.go delete mode 100755 vendor/github.com/disintegration/imaging/effects.go delete mode 100755 vendor/github.com/disintegration/imaging/helpers.go delete mode 100755 vendor/github.com/disintegration/imaging/histogram.go delete mode 100755 vendor/github.com/disintegration/imaging/resize.go delete mode 100755 vendor/github.com/disintegration/imaging/tools.go delete mode 100755 vendor/github.com/disintegration/imaging/transform.go delete mode 100755 vendor/github.com/disintegration/imaging/utils.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/AUTHORS delete mode 100755 vendor/github.com/go-sql-driver/mysql/CHANGELOG.md delete mode 100755 vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md delete mode 100755 vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md delete mode 100755 vendor/github.com/go-sql-driver/mysql/LICENSE delete mode 100755 vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md delete mode 100755 vendor/github.com/go-sql-driver/mysql/README.md delete mode 100755 vendor/github.com/go-sql-driver/mysql/appengine.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/buffer.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/collations.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/connection.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/const.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/driver.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/dsn.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/errors.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/infile.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/packets.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/result.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/rows.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/statement.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/transaction.go delete mode 100755 vendor/github.com/go-sql-driver/mysql/utils.go delete mode 100755 vendor/github.com/huichen/sego/README.md delete mode 100755 vendor/github.com/huichen/sego/dictionary.go delete mode 100755 vendor/github.com/huichen/sego/license.txt delete mode 100755 vendor/github.com/huichen/sego/segment.go delete mode 100755 vendor/github.com/huichen/sego/segmenter.go delete mode 100755 vendor/github.com/huichen/sego/test_utils.go delete mode 100755 vendor/github.com/huichen/sego/token.go delete mode 100755 vendor/github.com/huichen/sego/utils.go delete mode 100644 vendor/github.com/mozillazg/go-cos/CHANGELOG.md delete mode 100644 vendor/github.com/mozillazg/go-cos/LICENSE delete mode 100644 vendor/github.com/mozillazg/go-cos/Makefile delete mode 100644 vendor/github.com/mozillazg/go-cos/README.md delete mode 100644 vendor/github.com/mozillazg/go-cos/auth.go delete mode 100644 vendor/github.com/mozillazg/go-cos/bucket.go delete mode 100644 vendor/github.com/mozillazg/go-cos/bucket_acl.go delete mode 100644 vendor/github.com/mozillazg/go-cos/bucket_cors.go delete mode 100644 vendor/github.com/mozillazg/go-cos/bucket_lifecycle.go delete mode 100644 vendor/github.com/mozillazg/go-cos/bucket_location.go delete mode 100644 vendor/github.com/mozillazg/go-cos/bucket_part.go delete mode 100644 vendor/github.com/mozillazg/go-cos/bucket_tagging.go delete mode 100644 vendor/github.com/mozillazg/go-cos/cos.go delete mode 100644 vendor/github.com/mozillazg/go-cos/doc.go delete mode 100644 vendor/github.com/mozillazg/go-cos/error.go delete mode 100644 vendor/github.com/mozillazg/go-cos/helper.go delete mode 100644 vendor/github.com/mozillazg/go-cos/object.go delete mode 100644 vendor/github.com/mozillazg/go-cos/object_acl.go delete mode 100644 vendor/github.com/mozillazg/go-cos/object_part.go delete mode 100644 vendor/github.com/mozillazg/go-cos/service.go delete mode 100644 vendor/github.com/qiniu/api.v7/CHANGELOG.md delete mode 100644 vendor/github.com/qiniu/api.v7/Makefile delete mode 100644 vendor/github.com/qiniu/api.v7/README.md delete mode 100644 vendor/github.com/qiniu/api.v7/auth/qbox/doc.go delete mode 100644 vendor/github.com/qiniu/api.v7/auth/qbox/qbox_auth.go delete mode 100644 vendor/github.com/qiniu/api.v7/cdn/anti_leech.go delete mode 100644 vendor/github.com/qiniu/api.v7/cdn/api.go delete mode 100644 vendor/github.com/qiniu/api.v7/cdn/doc.go delete mode 100644 vendor/github.com/qiniu/api.v7/conf/conf.go delete mode 100644 vendor/github.com/qiniu/api.v7/conf/doc.go delete mode 100644 vendor/github.com/qiniu/api.v7/doc.go delete mode 100644 vendor/github.com/qiniu/api.v7/rtc/api.go delete mode 100644 vendor/github.com/qiniu/api.v7/rtc/doc.go delete mode 100644 vendor/github.com/qiniu/api.v7/rtc/util.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/base64_upload.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/bucket.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/config.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/doc.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/form_upload.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/pfop.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/resume_base.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/resume_upload.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/rpc.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/token.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/util.go delete mode 100644 vendor/github.com/qiniu/api.v7/storage/zone.go delete mode 100644 vendor/github.com/qiniu/api.v7/test-env.sh delete mode 100644 vendor/github.com/qiniu/x/bytes.v7/README.md delete mode 100644 vendor/github.com/qiniu/x/bytes.v7/bytes.go delete mode 100644 vendor/github.com/qiniu/x/bytes.v7/doc.go delete mode 100644 vendor/github.com/qiniu/x/bytes.v7/replace.go delete mode 100644 vendor/github.com/qiniu/x/bytes.v7/seekable/seekable.go delete mode 100644 vendor/github.com/qiniu/x/reqid.v7/reqid.go delete mode 100644 vendor/github.com/qiniu/x/xlog.v7/xlog.go delete mode 100755 vendor/github.com/smartystreets/assertions/CONTRIBUTING.md delete mode 100755 vendor/github.com/smartystreets/assertions/LICENSE.md delete mode 100755 vendor/github.com/smartystreets/assertions/README.md delete mode 100755 vendor/github.com/smartystreets/assertions/assertions.goconvey delete mode 100755 vendor/github.com/smartystreets/assertions/collections.go delete mode 100755 vendor/github.com/smartystreets/assertions/doc.go delete mode 100755 vendor/github.com/smartystreets/assertions/equality.go delete mode 100755 vendor/github.com/smartystreets/assertions/filter.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE delete mode 100755 vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/all_of.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/any.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/elements_are.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/error.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/has_substr.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/identical_to.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/new_matcher.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/panics.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/pointee.go delete mode 100755 vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go delete mode 100755 vendor/github.com/smartystreets/assertions/messages.go delete mode 100755 vendor/github.com/smartystreets/assertions/panic.go delete mode 100755 vendor/github.com/smartystreets/assertions/quantity.go delete mode 100755 vendor/github.com/smartystreets/assertions/serializer.go delete mode 100755 vendor/github.com/smartystreets/assertions/strings.go delete mode 100755 vendor/github.com/smartystreets/assertions/time.go delete mode 100755 vendor/github.com/smartystreets/assertions/type.go delete mode 100755 vendor/github.com/smartystreets/goconvey/LICENSE.md delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/assertions.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/context.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/convey.goconvey delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/discovery.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/doc.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/init.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/nilReporter.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/console.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/init.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/json.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go delete mode 100755 vendor/github.com/smartystreets/goconvey/convey/reporting/story.go delete mode 100644 vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/acme/acme.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/autocert.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/cache.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/listener.go delete mode 100644 vendor/golang.org/x/crypto/acme/autocert/renewal.go delete mode 100644 vendor/golang.org/x/crypto/acme/http.go delete mode 100644 vendor/golang.org/x/crypto/acme/jws.go delete mode 100644 vendor/golang.org/x/crypto/acme/types.go delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/html/atom/atom.go delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go delete mode 100644 vendor/golang.org/x/net/html/atom/table.go delete mode 100644 vendor/golang.org/x/net/html/const.go delete mode 100644 vendor/golang.org/x/net/html/doc.go delete mode 100644 vendor/golang.org/x/net/html/doctype.go delete mode 100644 vendor/golang.org/x/net/html/entity.go delete mode 100644 vendor/golang.org/x/net/html/escape.go delete mode 100644 vendor/golang.org/x/net/html/foreign.go delete mode 100644 vendor/golang.org/x/net/html/node.go delete mode 100644 vendor/golang.org/x/net/html/parse.go delete mode 100644 vendor/golang.org/x/net/html/render.go delete mode 100644 vendor/golang.org/x/net/html/token.go delete mode 100644 vendor/gopkg.in/gomail.v2/CHANGELOG.md delete mode 100644 vendor/gopkg.in/gomail.v2/CONTRIBUTING.md delete mode 100644 vendor/gopkg.in/gomail.v2/LICENSE delete mode 100644 vendor/gopkg.in/gomail.v2/README.md delete mode 100644 vendor/gopkg.in/gomail.v2/auth.go delete mode 100644 vendor/gopkg.in/gomail.v2/doc.go delete mode 100644 vendor/gopkg.in/gomail.v2/message.go delete mode 100644 vendor/gopkg.in/gomail.v2/mime.go delete mode 100644 vendor/gopkg.in/gomail.v2/mime_go14.go delete mode 100644 vendor/gopkg.in/gomail.v2/send.go delete mode 100644 vendor/gopkg.in/gomail.v2/smtp.go delete mode 100644 vendor/gopkg.in/gomail.v2/writeto.go delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml delete mode 100644 vendor/gopkg.in/yaml.v2/README.md delete mode 100644 vendor/gopkg.in/yaml.v2/apic.go delete mode 100644 vendor/gopkg.in/yaml.v2/decode.go delete mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/gopkg.in/yaml.v2/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v2/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v2/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v2/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go delete mode 100644 vendor/qiniupkg.com/x/bytes.v7/README.md delete mode 100644 vendor/qiniupkg.com/x/bytes.v7/bytes.go delete mode 100644 vendor/qiniupkg.com/x/bytes.v7/doc.go delete mode 100644 vendor/qiniupkg.com/x/bytes.v7/replace.go delete mode 100644 vendor/qiniupkg.com/x/log.v7/README.md delete mode 100644 vendor/qiniupkg.com/x/log.v7/logext.go delete mode 100644 vendor/qiniupkg.com/x/reqid.v7/reqid.go delete mode 100644 vendor/rsc.io/pdf/LICENSE delete mode 100644 vendor/rsc.io/pdf/README.md delete mode 100644 vendor/rsc.io/pdf/lex.go delete mode 100644 vendor/rsc.io/pdf/name.go delete mode 100644 vendor/rsc.io/pdf/page.go delete mode 100644 vendor/rsc.io/pdf/ps.go delete mode 100644 vendor/rsc.io/pdf/read.go delete mode 100644 vendor/rsc.io/pdf/text.go diff --git a/controllers/AdminControllers/BannerController.go b/controllers/AdminControllers/BannerController.go index 52849dd..aad5566 100644 --- a/controllers/AdminControllers/BannerController.go +++ b/controllers/AdminControllers/BannerController.go @@ -2,16 +2,14 @@ package AdminControllers import ( "fmt" - - "time" - "os" - "strings" + "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) //IT文库注册会员管理 diff --git a/controllers/AdminControllers/BaseController.go b/controllers/AdminControllers/BaseController.go index d4c52dd..0c44e03 100644 --- a/controllers/AdminControllers/BaseController.go +++ b/controllers/AdminControllers/BaseController.go @@ -1,16 +1,14 @@ package AdminControllers import ( + "fmt" "strings" - - "github.com/TruthHun/DocHub/models" - "time" - "fmt" - - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego" + + "DocHub/helper" + "DocHub/models" ) type BaseController struct { diff --git a/controllers/AdminControllers/DocController.go b/controllers/AdminControllers/DocController.go index 18045ae..4af52a8 100644 --- a/controllers/AdminControllers/DocController.go +++ b/controllers/AdminControllers/DocController.go @@ -2,15 +2,14 @@ package AdminControllers import ( "fmt" - "sort" - "strings" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/helper/conv" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/helper/conv" + "DocHub/models" ) type DocController struct { diff --git a/controllers/AdminControllers/FriendController.go b/controllers/AdminControllers/FriendController.go index f98d416..3da3fac 100644 --- a/controllers/AdminControllers/FriendController.go +++ b/controllers/AdminControllers/FriendController.go @@ -3,10 +3,10 @@ package AdminControllers import ( "time" - "github.com/TruthHun/DocHub/helper" - - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) type FriendController struct { diff --git a/controllers/AdminControllers/KindEditorController.go b/controllers/AdminControllers/KindEditorController.go index efbc904..46b31f8 100644 --- a/controllers/AdminControllers/KindEditorController.go +++ b/controllers/AdminControllers/KindEditorController.go @@ -5,8 +5,8 @@ import ( "os" "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" + "DocHub/helper" + "DocHub/models" ) type KindEditorController struct { diff --git a/controllers/AdminControllers/LoginController.go b/controllers/AdminControllers/LoginController.go index 83624c9..9a72448 100644 --- a/controllers/AdminControllers/LoginController.go +++ b/controllers/AdminControllers/LoginController.go @@ -2,12 +2,12 @@ package AdminControllers import ( "html/template" - "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) type LoginController struct { diff --git a/controllers/AdminControllers/ReportController.go b/controllers/AdminControllers/ReportController.go index dbf9f9c..80c4fbf 100644 --- a/controllers/AdminControllers/ReportController.go +++ b/controllers/AdminControllers/ReportController.go @@ -1,6 +1,6 @@ package AdminControllers -import "github.com/TruthHun/DocHub/models" +import "DocHub/models" type ReportController struct { BaseController diff --git a/controllers/AdminControllers/ScoreController.go b/controllers/AdminControllers/ScoreController.go index 7f39349..b761b68 100644 --- a/controllers/AdminControllers/ScoreController.go +++ b/controllers/AdminControllers/ScoreController.go @@ -1,6 +1,6 @@ package AdminControllers -import "github.com/TruthHun/DocHub/models" +import "DocHub/models" type ScoreController struct { BaseController diff --git a/controllers/AdminControllers/SeoController.go b/controllers/AdminControllers/SeoController.go index e39ef3e..7e121c6 100644 --- a/controllers/AdminControllers/SeoController.go +++ b/controllers/AdminControllers/SeoController.go @@ -1,6 +1,6 @@ package AdminControllers -import "github.com/TruthHun/DocHub/models" +import "DocHub/models" type SeoController struct { BaseController diff --git a/controllers/AdminControllers/SingleController.go b/controllers/AdminControllers/SingleController.go index b6d6224..c97c3bf 100644 --- a/controllers/AdminControllers/SingleController.go +++ b/controllers/AdminControllers/SingleController.go @@ -4,8 +4,9 @@ import ( "net/http" "time" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/models" ) type SingleController struct { diff --git a/controllers/AdminControllers/SysController.go b/controllers/AdminControllers/SysController.go index 62e8cd9..7ef83e5 100644 --- a/controllers/AdminControllers/SysController.go +++ b/controllers/AdminControllers/SysController.go @@ -1,19 +1,17 @@ package AdminControllers import ( + "io/ioutil" "net/http" + "os" + "path/filepath" "strings" - - "io/ioutil" "time" - "path/filepath" - - "os" - - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) type SysController struct { diff --git a/controllers/AdminControllers/UserController.go b/controllers/AdminControllers/UserController.go index 85a50a7..00599d1 100644 --- a/controllers/AdminControllers/UserController.go +++ b/controllers/AdminControllers/UserController.go @@ -2,13 +2,11 @@ package AdminControllers import ( "fmt" - - "github.com/TruthHun/DocHub/helper" - "strings" - "github.com/TruthHun/DocHub/helper/conv" - "github.com/TruthHun/DocHub/models" + "DocHub/helper" + "DocHub/helper/conv" + "DocHub/models" ) //IT文库注册会员管理 diff --git a/controllers/HomeControllers/BaseController.go b/controllers/HomeControllers/BaseController.go index ed96112..648c496 100644 --- a/controllers/HomeControllers/BaseController.go +++ b/controllers/HomeControllers/BaseController.go @@ -1,16 +1,16 @@ package HomeControllers import ( + "fmt" "html/template" "strings" - - "fmt" "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) type Output struct { diff --git a/controllers/HomeControllers/CollectController.go b/controllers/HomeControllers/CollectController.go index ca98f1f..829dd06 100644 --- a/controllers/HomeControllers/CollectController.go +++ b/controllers/HomeControllers/CollectController.go @@ -3,9 +3,10 @@ package HomeControllers import ( "fmt" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) type CollectController struct { diff --git a/controllers/HomeControllers/IndexController.go b/controllers/HomeControllers/IndexController.go index bc16276..1ce0bb2 100644 --- a/controllers/HomeControllers/IndexController.go +++ b/controllers/HomeControllers/IndexController.go @@ -2,12 +2,12 @@ package HomeControllers import ( "fmt" - "strings" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) type IndexController struct { diff --git a/controllers/HomeControllers/InstallController.go b/controllers/HomeControllers/InstallController.go index 93ae69d..bdc4bc5 100644 --- a/controllers/HomeControllers/InstallController.go +++ b/controllers/HomeControllers/InstallController.go @@ -3,9 +3,10 @@ package HomeControllers import ( "strings" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego" + + "DocHub/helper" + "DocHub/models" ) type InstallController struct { diff --git a/controllers/HomeControllers/ListController.go b/controllers/HomeControllers/ListController.go index 1cc4a45..364f797 100644 --- a/controllers/HomeControllers/ListController.go +++ b/controllers/HomeControllers/ListController.go @@ -1,16 +1,15 @@ package HomeControllers import ( - "strings" - "fmt" - + "strings" "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/helper/conv" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/helper/conv" + "DocHub/models" ) type ListController struct { diff --git a/controllers/HomeControllers/ReportController.go b/controllers/HomeControllers/ReportController.go index 7c9b0cf..b173b9b 100644 --- a/controllers/HomeControllers/ReportController.go +++ b/controllers/HomeControllers/ReportController.go @@ -3,9 +3,10 @@ package HomeControllers import ( "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) type ReportController struct { diff --git a/controllers/HomeControllers/SearchController.go b/controllers/HomeControllers/SearchController.go index d9037e6..ec1812e 100644 --- a/controllers/HomeControllers/SearchController.go +++ b/controllers/HomeControllers/SearchController.go @@ -2,13 +2,13 @@ package HomeControllers import ( "strings" - "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/helper/conv" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/helper/conv" + "DocHub/models" ) type SearchController struct { diff --git a/controllers/HomeControllers/StaticController.go b/controllers/HomeControllers/StaticController.go index 5c7fa11..51cb5ec 100644 --- a/controllers/HomeControllers/StaticController.go +++ b/controllers/HomeControllers/StaticController.go @@ -5,8 +5,9 @@ import ( "path/filepath" "strings" - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego" + + "DocHub/helper" ) type StaticController struct { diff --git a/controllers/HomeControllers/UploadController.go b/controllers/HomeControllers/UploadController.go index 2aa5fde..c23d8ac 100644 --- a/controllers/HomeControllers/UploadController.go +++ b/controllers/HomeControllers/UploadController.go @@ -5,13 +5,13 @@ import ( "os" "path/filepath" "strings" - "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/helper/conv" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/helper/conv" + "DocHub/models" ) type UploadController struct { diff --git a/controllers/HomeControllers/UserController.go b/controllers/HomeControllers/UserController.go index 9da3507..511b543 100644 --- a/controllers/HomeControllers/UserController.go +++ b/controllers/HomeControllers/UserController.go @@ -2,21 +2,18 @@ package HomeControllers import ( "fmt" + "os" "path/filepath" - - "github.com/astaxie/beego" - "strings" - "time" - "os" - - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/helper/conv" - "github.com/TruthHun/DocHub/models" + "github.com/astaxie/beego" "github.com/astaxie/beego/orm" "github.com/astaxie/beego/validation" + + "DocHub/helper" + "DocHub/helper/conv" + "DocHub/models" ) type UserController struct { diff --git a/controllers/HomeControllers/ViewController.go b/controllers/HomeControllers/ViewController.go index c788e4e..9609518 100644 --- a/controllers/HomeControllers/ViewController.go +++ b/controllers/HomeControllers/ViewController.go @@ -2,15 +2,14 @@ package HomeControllers import ( "fmt" - "strings" - "time" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" "github.com/astaxie/beego" "github.com/astaxie/beego/orm" + + "DocHub/helper" + "DocHub/models" ) type ViewController struct { diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..45c34d4 --- /dev/null +++ b/go.mod @@ -0,0 +1,31 @@ +module DocHub + +go 1.13 + +require ( + github.com/PuerkitoBio/goquery v1.5.0 + github.com/TruthHun/CloudStore v0.0.0-20190320141136-5df4f3bcd23a + github.com/TruthHun/gotil v0.0.0-20191003091818-17b80aad8a45 + github.com/adamzy/cedar-go v0.0.0-20170805034717-80a9c64b256d // indirect + github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible // indirect + github.com/astaxie/beego v1.12.0 + github.com/baidubce/bce-sdk-go v0.9.5 // indirect + github.com/disintegration/imaging v1.6.2 + github.com/go-ini/ini v1.51.0 // indirect + github.com/go-sql-driver/mysql v1.4.1 + github.com/huichen/sego v0.0.0-20180617034105-3f3c8a8cfacc + github.com/minio/minio-go v6.0.14+incompatible // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/qiniu/api.v7 v7.2.5+incompatible // indirect + github.com/qiniu/x v7.0.8+incompatible // indirect + github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644 // indirect + github.com/tdewolff/minify v2.3.6+incompatible + github.com/tdewolff/parse v2.3.4+incompatible // indirect + github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20191108095731-8ca4b370cde4 // indirect + github.com/upyun/go-sdk v2.1.0+incompatible // indirect + golang.org/x/sys v0.0.0-20191119195528-f068ffe820e4 // indirect + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect + gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df + qiniupkg.com/x v7.0.8+incompatible // indirect + rsc.io/pdf v0.1.1 +) diff --git a/helper/config.go b/helper/config.go index 287a536..a41a774 100644 --- a/helper/config.go +++ b/helper/config.go @@ -2,14 +2,11 @@ package helper import ( "fmt" + "io/ioutil" + "os" "strconv" - "time" - "os" - - "io/ioutil" - "github.com/astaxie/beego" ) diff --git a/helper/convert.go b/helper/convert.go index a31620b..1432e8c 100644 --- a/helper/convert.go +++ b/helper/convert.go @@ -10,10 +10,9 @@ import ( "strings" "time" + "github.com/astaxie/beego" "github.com/tdewolff/minify" "github.com/tdewolff/minify/svg" - - "github.com/astaxie/beego" ) // 将 PDF 转成 SVG diff --git a/helper/helper.go b/helper/helper.go index 6dc18ff..2feaff2 100644 --- a/helper/helper.go +++ b/helper/helper.go @@ -3,46 +3,34 @@ package helper import ( "bytes" "compress/gzip" + "crypto/md5" + "crypto/sha1" "encoding/hex" + "errors" "fmt" "html/template" "image" - "math/rand" - - "crypto/md5" - - "time" - - "strconv" - - "os" - "io" - "strings" - - "crypto/sha1" - - "net/url" - - "os/exec" - "io/ioutil" - - "regexp" - - "errors" - + "math/rand" "net/http" - + "net/url" + "os" + "os/exec" "path/filepath" + "regexp" + "strconv" + "strings" + "time" "github.com/PuerkitoBio/goquery" - "github.com/TruthHun/DocHub/helper/crawl" "github.com/astaxie/beego" "github.com/astaxie/beego/cache" "github.com/disintegration/imaging" "github.com/huichen/sego" "rsc.io/pdf" + + "DocHub/helper/crawl" ) func init() { diff --git a/helper/logs.go b/helper/logs.go index d645273..38e06dd 100644 --- a/helper/logs.go +++ b/helper/logs.go @@ -1,9 +1,8 @@ package helper import ( - "os" - "fmt" + "os" "github.com/astaxie/beego" "github.com/astaxie/beego/logs" diff --git a/main.go b/main.go index aba71ca..f725d7e 100644 --- a/main.go +++ b/main.go @@ -3,11 +3,16 @@ package main import ( "fmt" - "github.com/TruthHun/DocHub/controllers/HomeControllers" - "github.com/TruthHun/DocHub/helper" - "github.com/TruthHun/DocHub/models" - _ "github.com/TruthHun/DocHub/routers" + _ "DocHub/routers" + "github.com/astaxie/beego" + "github.com/astaxie/beego/logs" + + + "DocHub/controllers/HomeControllers" + "DocHub/helper" + "DocHub/models" + ) //初始化函数 @@ -27,7 +32,7 @@ func init() { //初始化分词器 go func() { helper.Segmenter.LoadDictionary("./dictionary/dictionary.txt") - beego.Info("==程序启动完毕==") + logs.Info("==程序启动完毕==") }() beego.AddFuncMap("TimestampFormat", helper.TimestampFormat) diff --git a/models/AdminModel.go b/models/AdminModel.go index 4d0cf85..7d3ed7a 100644 --- a/models/AdminModel.go +++ b/models/AdminModel.go @@ -1,8 +1,9 @@ package models import ( - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //管理员数据表 diff --git a/models/BannerModel.go b/models/BannerModel.go index 6a715f8..8705905 100644 --- a/models/BannerModel.go +++ b/models/BannerModel.go @@ -1,8 +1,9 @@ package models import ( - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //横幅 diff --git a/models/CategoryModel.go b/models/CategoryModel.go index b1bf6c6..72b5b92 100644 --- a/models/CategoryModel.go +++ b/models/CategoryModel.go @@ -3,8 +3,9 @@ package models import ( "errors" - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //文档分类 diff --git a/models/CloudStoreModel.go b/models/CloudStoreModel.go index b2283e6..ef82624 100644 --- a/models/CloudStoreModel.go +++ b/models/CloudStoreModel.go @@ -8,9 +8,9 @@ import ( "strings" "github.com/PuerkitoBio/goquery" - CloudStore2 "github.com/TruthHun/CloudStore" - "github.com/TruthHun/DocHub/helper" + + "DocHub/helper" ) type CloudStore struct { diff --git a/models/CollectModel.go b/models/CollectModel.go index 89a5c15..f6b8f0b 100644 --- a/models/CollectModel.go +++ b/models/CollectModel.go @@ -3,8 +3,9 @@ package models import ( "errors" - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //会员文档收藏的文件夹 diff --git a/models/ConfigModel.go b/models/ConfigModel.go index b42b64e..7fc5fad 100644 --- a/models/ConfigModel.go +++ b/models/ConfigModel.go @@ -12,10 +12,9 @@ import ( gomail "gopkg.in/gomail.v2" "github.com/astaxie/beego" - - "github.com/TruthHun/DocHub/helper" - "github.com/astaxie/beego/orm" + + "DocHub/helper" ) const ( diff --git a/models/DocumentModel.go b/models/DocumentModel.go index 3779c6a..164eee4 100644 --- a/models/DocumentModel.go +++ b/models/DocumentModel.go @@ -1,16 +1,14 @@ package models import ( - "fmt" - "strings" - - "github.com/TruthHun/DocHub/helper" - "errors" - + "fmt" "strconv" + "strings" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //文档资源状态,1正常,0文档未转换成功,-1删除,同时把id录入文档回收站id,-2表示删除了文档文件,但是数据库记录还保留。同时后台也看不到该记录 diff --git a/models/ElasticSearchModel.go b/models/ElasticSearchModel.go index 1d503e1..443d8b8 100644 --- a/models/ElasticSearchModel.go +++ b/models/ElasticSearchModel.go @@ -1,23 +1,21 @@ package models import ( + "encoding/json" "errors" + "fmt" "io/ioutil" - "strings" - "time" - "net/http" - - "encoding/json" "strconv" + "strings" + "time" - "fmt" - - "github.com/TruthHun/DocHub/helper" "github.com/TruthHun/gotil/util" "github.com/astaxie/beego" "github.com/astaxie/beego/httplib" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //全文搜索客户端 diff --git a/models/Install.go b/models/Install.go index 3b51a82..4bc7ee8 100644 --- a/models/Install.go +++ b/models/Install.go @@ -4,9 +4,10 @@ import ( "fmt" "time" - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) func install() { diff --git a/models/Models.go b/models/Models.go index ee95fb4..4be3a62 100644 --- a/models/Models.go +++ b/models/Models.go @@ -2,29 +2,22 @@ package models import ( + "database/sql" + "errors" "fmt" "net/url" - - "github.com/TruthHun/DocHub/helper" - + "os" "reflect" - + "strconv" "strings" + "time" - "errors" + _ "github.com/go-sql-driver/mysql" "github.com/astaxie/beego" "github.com/astaxie/beego/orm" - "os" - - "strconv" - - "time" - - "database/sql" - - _ "github.com/go-sql-driver/mysql" + "DocHub/helper" ) //注意一下,varchar最多能存储65535个字符 diff --git a/models/RecycleModel.go b/models/RecycleModel.go index 9e7b639..08d0700 100644 --- a/models/RecycleModel.go +++ b/models/RecycleModel.go @@ -5,9 +5,9 @@ import ( "strings" "time" - "github.com/TruthHun/DocHub/helper" - "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //文档回收站 diff --git a/models/RemarkModel.go b/models/RemarkModel.go index 2559d4d..1d7913f 100644 --- a/models/RemarkModel.go +++ b/models/RemarkModel.go @@ -3,11 +3,11 @@ package models import ( "fmt" "io/ioutil" - "time" - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //文档备注,用于侵权文档等的部分内容的预览展示,并在文档预览页面挂上跳转购买正版的导购链接;同时对于一些开源书籍,也可以一面提供站内文档的下载,一面引导用户购买正版。 diff --git a/models/SeoModel.go b/models/SeoModel.go index 7dac6e8..6607e21 100644 --- a/models/SeoModel.go +++ b/models/SeoModel.go @@ -1,19 +1,17 @@ package models import ( + "fmt" + "os" "path/filepath" + "strconv" "strings" "time" - "os" - - "strconv" - - "fmt" - - "github.com/TruthHun/DocHub/helper" "github.com/TruthHun/gotil/sitemap" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //SEO配置表 diff --git a/models/UserModel.go b/models/UserModel.go index 798e205..b07d98e 100644 --- a/models/UserModel.go +++ b/models/UserModel.go @@ -5,11 +5,11 @@ import ( "fmt" "reflect" "strings" - "time" - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) //用户表 diff --git a/models/Utils.go b/models/Utils.go index 30ea825..23c12c6 100644 --- a/models/Utils.go +++ b/models/Utils.go @@ -11,9 +11,9 @@ import ( "time" "github.com/astaxie/beego" - - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) // 文档处理 diff --git a/models/WordModel.go b/models/WordModel.go index 5dcc571..6b86e53 100644 --- a/models/WordModel.go +++ b/models/WordModel.go @@ -4,8 +4,9 @@ import ( "fmt" "strings" - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego/orm" + + "DocHub/helper" ) // ============================ // diff --git a/routers/router.go b/routers/router.go index d44c9be..88fe208 100644 --- a/routers/router.go +++ b/routers/router.go @@ -1,10 +1,10 @@ package routers import ( - "github.com/TruthHun/DocHub/controllers/AdminControllers" + "DocHub/controllers/AdminControllers" + "DocHub/controllers/HomeControllers" + "DocHub/helper" - "github.com/TruthHun/DocHub/controllers/HomeControllers" - "github.com/TruthHun/DocHub/helper" "github.com/astaxie/beego" "github.com/astaxie/beego/context" ) diff --git a/vendor/github.com/TruthHun/gotil/cryptil/cryptil.go b/vendor/github.com/TruthHun/gotil/cryptil/cryptil.go deleted file mode 100644 index 8e6fe84..0000000 --- a/vendor/github.com/TruthHun/gotil/cryptil/cryptil.go +++ /dev/null @@ -1,70 +0,0 @@ -package cryptil - -import ( - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/base64" - "fmt" - "strconv" - "strings" - "time" -) - -//对称加密与解密之加密【从Beego中提取出来的】 -//@param value 需要加密的字符串 -//@param secret 加密密钥 -//@return encrypt 返回的加密后的字符串 -func Encrypt(value, secret string) (encrypt string) { - vs := base64.URLEncoding.EncodeToString([]byte(value)) - timestamp := strconv.FormatInt(time.Now().UnixNano(), 10) - h := hmac.New(sha1.New, []byte(secret)) - fmt.Fprintf(h, "%s%s", vs, timestamp) - sig := fmt.Sprintf("%02x", h.Sum(nil)) - return strings.Join([]string{vs, timestamp, sig}, ".") -} - -//对称加密与解密之解密【从Beego中提取出来的】 -//@param value 需要解密的字符串 -//@param secret 密钥 -//@return decrypt 返回解密后的字符串 -func Decrypt(value, secret string) (decrypt string) { - parts := strings.SplitN(value, ".", 3) - if len(parts) != 3 { - return "" - } - vs := parts[0] - timestamp := parts[1] - sig := parts[2] - h := hmac.New(sha1.New, []byte(secret)) - fmt.Fprintf(h, "%s%s", vs, timestamp) - if fmt.Sprintf("%02x", h.Sum(nil)) != sig { - return "" - } - res, _ := base64.URLEncoding.DecodeString(vs) - return string(res) -} - -//MD5加密 -//@param str 需要加密的字符串 -//@param salt 盐值 -//@return CryptStr 加密后返回的字符串 -func Md5Crypt(str string, salt ...interface{}) (CryptStr string) { - if l := len(salt); l > 0 { - slice := make([]string, l+1) - str = fmt.Sprintf(str+strings.Join(slice, "%v"), salt...) - } - return fmt.Sprintf("%x", md5.Sum([]byte(str))) -} - -//SHA1加密 -//@param str 需要加密的字符串 -//@param salt 盐值 -//@return CryptStr 加密后返回的字符串 -func Sha1Crypt(str string, salt ...interface{}) (CryptStr string) { - if l := len(salt); l > 0 { - slice := make([]string, l+1) - str = fmt.Sprintf(str+strings.Join(slice, "%v"), salt...) - } - return fmt.Sprintf("%x", sha1.Sum([]byte(str))) -} diff --git a/vendor/github.com/TruthHun/gotil/cryptil/readme.md b/vendor/github.com/TruthHun/gotil/cryptil/readme.md deleted file mode 100644 index 02a589f..0000000 --- a/vendor/github.com/TruthHun/gotil/cryptil/readme.md +++ /dev/null @@ -1 +0,0 @@ -加密相关工具 \ No newline at end of file diff --git a/vendor/github.com/TruthHun/gotil/util/util.go b/vendor/github.com/TruthHun/gotil/util/util.go deleted file mode 100644 index 5c353c7..0000000 --- a/vendor/github.com/TruthHun/gotil/util/util.go +++ /dev/null @@ -1,222 +0,0 @@ -package util - -import ( - "encoding/json" - "fmt" - "net/http" - "os" - "strconv" - "strings" - "time" - - "crypto/tls" - - "path/filepath" - - "github.com/TruthHun/gotil/cryptil" - "github.com/astaxie/beego/httplib" -) - -//构造request请求。[如果需要gzip压缩,请自行在headers参数配置,但是处理响应体的时候,记得使用gzip解压缩] -//@param method 请求方法:get、post、put、delete、head -//@param url 请求链接 -//@param referrer 如果有referrer,则配置该选项的header.注意:这里可以置空,然后在headers参数中配置 -//@param cookie 如果有cookie,则配置该选项的header.注意:这里可以置空,然后在headers参数中配置 -//@param os 操作系统,用于配置UA,参数值:windows、linux、Android、ios、mac。默认mac下的谷歌浏览器UA -//@param iscn 是否是中文请求,用于访问多语言的站点 -//@param isjson 是否请求的是json数据 -//@param headers 更多请求头配置项 -//@return 返回http请求 -func BuildRequest(method, url, referrer, cookie, os string, iscn, isjson bool, headers ...map[string]string) *httplib.BeegoHTTPRequest { - var req *httplib.BeegoHTTPRequest - switch strings.ToLower(method) { - case "get": - req = httplib.Get(url) - case "post": - req = httplib.Post(url) - case "put": - req = httplib.Put(url) - case "delete": - req = httplib.Delete(url) - case "head": - req = httplib.Head(url) - default: - req = httplib.Get(url) - } - - //https请求处理 - if strings.HasPrefix(strings.ToLower(url), "https") { - req.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) - } - - //设置referrer - if len(referrer) > 0 { - req.Header("Referrer", referrer) - } else { //设置默认referer - if slice := strings.Split(url, "://"); len(slice) > 1 { - req.Header("Referrer", slice[0]+"://"+strings.Split(slice[1], "/")[0]) - } - } - - //设置cookie - if len(cookie) > 0 { - req.Header("Cookie", cookie) - } - - //设置host[如有需求,自行在headers中添加] - //host_slice := strings.Split(url, "://") - //if len(host_slice) > 1 { - // host := strings.Split(host_slice[1], "/")[0] - // req.SetHost(host) - //} - - //压缩[如有需求,自行在headers中添加] - //req.Header("Accept-Encoding", "gzip, deflate, br") - - //中文 - if iscn { - req.Header("Accept-Language", "zh-CN,zh;q=0.8,en;q=0.6") - } else { - req.Header("Accept-Language", "en-US,en;q=0.8,zh;q=0.6") - } - //是否是json采集 - if isjson { - req.Header("Accept", "application/json") - req.Header("X-Request", "JSON") - req.Header("X-Requested-With", "XMLHttpRequest") - } else { - req.Header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8") - } - - //系统设置 - switch strings.ToLower(os) { - case "windows": - req.Header("User-Agent", "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36") - case "linux": - req.Header("User-Agent", "Mozilla/5.0 (X11; U; Linux i686) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15") - case "mac": - req.Header("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36") - case "android": - req.Header("User-Agent", "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1") - case "ios": - req.Header("User-Agent", "Mozilla/5.0(iPhone; CPU iPhone OS 9_3_3 like Mac OS X)AppleWebkit/601.1.46(KHTML,like Gecko)Mobile/13G3") - default: //mac - req.Header("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36") - } - - //设置headers - if len(headers) > 0 { - for _, header := range headers { - for k, v := range header { - req.Header(k, v) - } - } - } - return req -} - -//将interface数据转json -//@param itf 将数据转成json -//@return 返回json字符串 -func InterfaceToJson(itf interface{}) string { - b, _ := json.Marshal(&itf) - return string(b) -} - -//将interface数据转成int64[如果需要转成int,直接int(int64_number)即可] -//@param itf 需要转成整型的参数 -//@param num 转换结果 -//@param err 错误 -func InterfaceToInt64(itf interface{}) (num int64, err error) { - return strconv.ParseInt(fmt.Sprintf("%v", itf), 10, 64) -} - -//将interface数据转成float64[如果需要转成float32,直接float32(float64_number)即可] -//@param itf 需要转成整型的参数 -//@param num 转化结果 -//@param err 错误 -func InterfaceToFloat64(itf interface{}) (num float64, err error) { - return strconv.ParseFloat(fmt.Sprintf("%v", itf), 64) -} - -//下载图片文件 -//@param filelink 需要采集的文件链接 -//@param savefolder 采集的文件存放路径 -//@param timeout 超时时间,默认30秒 -//@return filepath 采集下载返回的文件 -//@return err 错误。当不为nil的时候,文件不会存在 -func CrawlFile(filelink string, savefolder string, timeout ...int) (file string, err error) { - var ( - resp *http.Response //响应 - ContentTypes = map[string]string{ - //以下是图片 - "image/png": ".png", - "image/jpeg": ".jpg", - "image/jpg": ".jpg", - "image/gif": ".gif", - "image/x-icon": ".ico", - "image/tiff": ".tif", - "image/webp": ".webp", - "application/x-bmp": ".bmp", - "application/x-jpg": ".jpg", - //文档文件 - "application/pdf": ".pdf", - "application/msword": ".doc", - "application/vnd.ms-powerpoint": ".ppt", - "application/vnd.ms-excel": ".xls", - //多媒体文件 - "application/xml": ".xml", - "application/json": ".json", - "application/javascript": ".js", - "application/ogg": ".ogg", - "application/xhtml+xml": ".xhtml", - "application/zip": ".zip", - "application/x-ico": ".ico", - "text/html": ".html", - "audio/mp3": ".mp3", - "audio/mp4": ".mp4", - "audio/wav": ".wav ", - "audio/x-ms-wax": ".wax", - "video/x-ms-asf": ".asf", - "video/avi": ".avi", - "video/x-sgi-movie": ".movie", - "video/mpeg4": ".mp4", - "video/x-mpg": ".mpa", - "video/x-mpeg": ".mpe", - "video/mpg": ".mpg", - "video/x-ms-wm": ".wm", - "video/x-ms-wmv": ".wmv", - "audio/x-ms-wma": ".wma", - } - ) - to := 30 //timeout,默认30秒 - if len(timeout) > 0 { - to = timeout[0] - } - - duration := time.Duration(to) - - req := BuildRequest("get", filelink, "", "", "mac", true, false) - if strings.HasPrefix(filelink, "https") { - req.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) - } - req.SetTimeout(duration/2*time.Second, duration*time.Second) - if resp, err = req.DoRequest(); err == nil && resp.StatusCode >= 200 && resp.StatusCode < 300 { - os.MkdirAll(savefolder, os.ModePerm) - ct := strings.ToLower(resp.Header.Get("content-type")) - slice := strings.Split(resp.Header.Get("Content-Disposition"), "=") - filename := slice[len(slice)-1] - if len(strings.TrimSpace(filename)) == 0 { - if ext, ok := ContentTypes[ct]; ok { - filename = cryptil.Md5Crypt(filelink) + ext - } else { - if iext := filepath.Ext(strings.Split(filelink, "?")[0]); len(iext) > 0 { - filename = cryptil.Md5Crypt(filelink) + iext - } - } - } - file = strings.TrimRight(savefolder, "/") + "/" + filename - err = req.ToFile(file) - } - return -} diff --git a/vendor/github.com/adamzy/cedar-go/LICENSE.md b/vendor/github.com/adamzy/cedar-go/LICENSE.md deleted file mode 100755 index aa4ea03..0000000 --- a/vendor/github.com/adamzy/cedar-go/LICENSE.md +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - {description} - Copyright (C) {year} {fullname} - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - {signature of Ty Coon}, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/vendor/github.com/adamzy/cedar-go/README.md b/vendor/github.com/adamzy/cedar-go/README.md deleted file mode 100755 index 38eb9b4..0000000 --- a/vendor/github.com/adamzy/cedar-go/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# cedar-go [![GoDoc](https://godoc.org/github.com/adamzy/cedar-go?status.svg)](https://godoc.org/github.com/adamzy/cedar-go) - -Package `cedar-go` implementes double-array trie. - -It is a [Golang](https://golang.org/) port of [cedar](http://www.tkl.iis.u-tokyo.ac.jp/~ynaga/cedar) which is written in C++ by Naoki Yoshinaga. `cedar-go` currently implements the `reduced` verion of cedar. -This package is not thread safe if there is one goroutine doing insertions or deletions. - -## Install -``` -go get github.com/adamzy/cedar-go -``` - -## Usage -```go -package main - -import ( - "fmt" - - "github.com/adamzy/cedar-go" -) - -func main() { - // create a new cedar trie. - trie := cedar.New() - - tools - printIdKeyValue := func(id int) { - // the key of node `id`. - key, _ := trie.Key(id) - // the value of node `id`. - value, _ := trie.Value(id) - fmt.Printf("%d\t%s:%v\n", id, key, value) - } - - // Insert key-value pairs. - // The order of insertion is not important. - trie.Insert([]byte("How many"), 0) - trie.Insert([]byte("How many loved"), 1) - trie.Insert([]byte("How many loved your moments"), 2) - trie.Insert([]byte("How many loved your moments of glad grace"), 3) - trie.Insert([]byte("姑苏"), 4) - trie.Insert([]byte("姑苏城外"), 5) - trie.Insert([]byte("姑苏城外寒山寺"), 6) - - // Get the associated value of a key directly. - value, _ := trie.Get([]byte("How many loved your moments of glad grace")) - fmt.Println(value) - - // Or, jump to the node first, - id, _ := trie.Jump([]byte("How many loved your moments"), 0) - // then get the key and the value - printIdKeyValue(id) - - fmt.Println("\nPrefixMatch\nid\tkey:value") - for _, id := range trie.PrefixMatch([]byte("How many loved your moments of glad grace"), 0) { - printIdKeyValue(id) - } - - fmt.Println("\nPrefixPredict\nid\tkey:value") - for _, id := range trie.PrefixPredict([]byte("姑苏"), 0) { - printIdKeyValue(id) - } -} -``` -will produce -``` -3 -281 How many loved your moments:2 - -PrefixMatch -id key:value -262 How many:0 -268 How many loved:1 -281 How many loved your moments:2 -296 How many loved your moments of glad grace:3 - -PrefixPredict -id key:value -303 姑苏:4 -309 姑苏城外:5 -318 姑苏城外寒山寺:6 -``` diff --git a/vendor/github.com/adamzy/cedar-go/api.go b/vendor/github.com/adamzy/cedar-go/api.go deleted file mode 100755 index b8470da..0000000 --- a/vendor/github.com/adamzy/cedar-go/api.go +++ /dev/null @@ -1,231 +0,0 @@ -package cedar - -// Status reports the following statistics of the cedar: -// keys: number of keys that are in the cedar, -// nodes: number of trie nodes (slots in the base array) has been taken, -// size: the size of the base array used by the cedar, -// capacity: the capicity of the base array used by the cedar. -func (da *Cedar) Status() (keys, nodes, size, capacity int) { - for i := 0; i < da.Size; i++ { - n := da.Array[i] - if n.Check >= 0 { - nodes++ - if n.Value >= 0 { - keys++ - } - } - } - return keys, nodes, da.Size, da.Capacity -} - -// Jump travels from a node `from` to another node `to` by following the path `path`. -// For example, if the following keys were inserted: -// id key -// 19 abc -// 23 ab -// 37 abcd -// then -// Jump([]byte("ab"), 0) = 23, nil // reach "ab" from root -// Jump([]byte("c"), 23) = 19, nil // reach "abc" from "ab" -// Jump([]byte("cd"), 23) = 37, nil // reach "abcd" from "ab" -func (da *Cedar) Jump(path []byte, from int) (to int, err error) { - for _, b := range path { - if da.Array[from].Value >= 0 { - return from, ErrNoPath - } - to = da.Array[from].base() ^ int(b) - if da.Array[to].Check != from { - return from, ErrNoPath - } - from = to - } - return to, nil -} - -// Key returns the key of the node with the given `id`. -// It will return ErrNoPath, if the node does not exist. -func (da *Cedar) Key(id int) (key []byte, err error) { - for id > 0 { - from := da.Array[id].Check - if from < 0 { - return nil, ErrNoPath - } - if char := byte(da.Array[from].base() ^ id); char != 0 { - key = append(key, char) - } - id = from - } - if id != 0 || len(key) == 0 { - return nil, ErrInvalidKey - } - for i := 0; i < len(key)/2; i++ { - key[i], key[len(key)-i-1] = key[len(key)-i-1], key[i] - } - return key, nil -} - -// Value returns the value of the node with the given `id`. -// It will return ErrNoValue, if the node does not have a value. -func (da *Cedar) Value(id int) (value int, err error) { - value = da.Array[id].Value - if value >= 0 { - return value, nil - } - to := da.Array[id].base() - if da.Array[to].Check == id && da.Array[to].Value >= 0 { - return da.Array[to].Value, nil - } - return 0, ErrNoValue -} - -// Insert adds a key-value pair into the cedar. -// It will return ErrInvalidValue, if value < 0 or >= ValueLimit. -func (da *Cedar) Insert(key []byte, value int) error { - if value < 0 || value >= ValueLimit { - return ErrInvalidValue - } - p := da.get(key, 0, 0) - *p = value - return nil -} - -// Update increases the value associated with the `key`. -// The `key` will be inserted if it is not in the cedar. -// It will return ErrInvalidValue, if the updated value < 0 or >= ValueLimit. -func (da *Cedar) Update(key []byte, value int) error { - p := da.get(key, 0, 0) - if *p+value < 0 || *p+value >= ValueLimit { - return ErrInvalidValue - } - *p += value - return nil -} - -// Delete removes a key-value pair from the cedar. -// It will return ErrNoPath, if the key has not been added. -func (da *Cedar) Delete(key []byte) error { - // if the path does not exist, or the end is not a leaf, nothing to delete - to, err := da.Jump(key, 0) - if err != nil { - return ErrNoPath - } - - if da.Array[to].Value < 0 { - base := da.Array[to].base() - if da.Array[base].Check == to { - to = base - } - } - - for { - from := da.Array[to].Check - base := da.Array[from].base() - label := byte(to ^ base) - - // if `to` has sibling, remove `to` from the sibling list, then stop - if da.Ninfos[to].Sibling != 0 || da.Ninfos[from].Child != label { - // delete the label from the child ring first - da.popSibling(from, base, label) - // then release the current node `to` to the empty node ring - da.pushEnode(to) - break - } - // otherwise, just release the current node `to` to the empty node ring - da.pushEnode(to) - // then check its parent node - to = from - } - return nil -} - -// Get returns the value associated with the given `key`. -// It is equivalent to -// id, err1 = Jump(key) -// value, err2 = Value(id) -// Thus, it may return ErrNoPath or ErrNoValue, -func (da *Cedar) Get(key []byte) (value int, err error) { - to, err := da.Jump(key, 0) - if err != nil { - return 0, err - } - return da.Value(to) -} - -// PrefixMatch returns a list of at most `num` nodes which match the prefix of the key. -// If `num` is 0, it returns all matches. -// For example, if the following keys were inserted: -// id key -// 19 abc -// 23 ab -// 37 abcd -// then -// PrefixMatch([]byte("abc"), 1) = [ 23 ] // match ["ab"] -// PrefixMatch([]byte("abcd"), 0) = [ 23, 19, 37] // match ["ab", "abc", "abcd"] -func (da *Cedar) PrefixMatch(key []byte, num int) (ids []int) { - for from, i := 0, 0; i < len(key); i++ { - to, err := da.Jump(key[i:i+1], from) - if err != nil { - break - } - if _, err := da.Value(to); err == nil { - ids = append(ids, to) - num-- - if num == 0 { - return - } - } - from = to - } - return -} - -// PrefixPredict returns a list of at most `num` nodes which has the key as their prefix. -// These nodes are ordered by their keys. -// If `num` is 0, it returns all matches. -// For example, if the following keys were inserted: -// id key -// 19 abc -// 23 ab -// 37 abcd -// then -// PrefixPredict([]byte("ab"), 2) = [ 23, 19 ] // predict ["ab", "abc"] -// PrefixPredict([]byte("ab"), 0) = [ 23, 19, 37 ] // predict ["ab", "abc", "abcd"] -func (da *Cedar) PrefixPredict(key []byte, num int) (ids []int) { - root, err := da.Jump(key, 0) - if err != nil { - return - } - for from, err := da.begin(root); err == nil; from, err = da.next(from, root) { - ids = append(ids, from) - num-- - if num == 0 { - return - } - } - return -} - -func (da *Cedar) begin(from int) (to int, err error) { - for c := da.Ninfos[from].Child; c != 0; { - to = da.Array[from].base() ^ int(c) - c = da.Ninfos[to].Child - from = to - } - if da.Array[from].base() > 0 { - return da.Array[from].base(), nil - } - return from, nil -} - -func (da *Cedar) next(from int, root int) (to int, err error) { - c := da.Ninfos[from].Sibling - for c == 0 && from != root && da.Array[from].Check >= 0 { - from = da.Array[from].Check - c = da.Ninfos[from].Sibling - } - if from == root { - return 0, ErrNoPath - } - from = da.Array[da.Array[from].Check].base() ^ int(c) - return da.begin(from) -} diff --git a/vendor/github.com/adamzy/cedar-go/cedar.go b/vendor/github.com/adamzy/cedar-go/cedar.go deleted file mode 100755 index 897f562..0000000 --- a/vendor/github.com/adamzy/cedar-go/cedar.go +++ /dev/null @@ -1,407 +0,0 @@ -package cedar - -const ValueLimit = int(^uint(0) >> 1) - -type node struct { - Value int - Check int -} - -func (n *node) base() int { return -(n.Value + 1) } - -type ninfo struct { - Sibling, Child byte -} - -type block struct { - Prev, Next, Num, Reject, Trial, Ehead int -} - -func (b *block) init() { - b.Num = 256 - b.Reject = 257 -} - -type Cedar struct { - *cedar -} - -type cedar struct { - Array []node - Ninfos []ninfo - Blocks []block - Reject [257]int - BheadF int - BheadC int - BheadO int - Capacity int - Size int - Ordered bool - MaxTrial int -} - -func New() *Cedar { - da := cedar{ - Array: make([]node, 256), - Ninfos: make([]ninfo, 256), - Blocks: make([]block, 1), - Capacity: 256, - Size: 256, - Ordered: true, - MaxTrial: 1, - } - - da.Array[0] = node{-2, 0} - for i := 1; i < 256; i++ { - da.Array[i] = node{-(i - 1), -(i + 1)} - } - da.Array[1].Value = -255 - da.Array[255].Check = -1 - - da.Blocks[0].Ehead = 1 - da.Blocks[0].init() - - for i := 0; i <= 256; i++ { - da.Reject[i] = i + 1 - } - - return &Cedar{&da} -} - -// Get value by key, insert the key if not exist -func (da *cedar) get(key []byte, from, pos int) *int { - for ; pos < len(key); pos++ { - if value := da.Array[from].Value; value >= 0 && value != ValueLimit { - to := da.follow(from, 0) - da.Array[to].Value = value - } - from = da.follow(from, key[pos]) - } - to := from - if da.Array[from].Value < 0 { - to = da.follow(from, 0) - } - return &da.Array[to].Value -} - -func (da *cedar) follow(from int, label byte) int { - base := da.Array[from].base() - to := base ^ int(label) - if base < 0 || da.Array[to].Check < 0 { - hasChild := false - if base >= 0 { - hasChild = (da.Array[base^int(da.Ninfos[from].Child)].Check == from) - } - to = da.popEnode(base, label, from) - da.pushSibling(from, to^int(label), label, hasChild) - } else if da.Array[to].Check != from { - to = da.resolve(from, base, label) - } else if da.Array[to].Check == from { - } else { - panic("cedar: internal error, should not be here") - } - return to -} - -func (da *cedar) popBlock(bi int, head_in *int, last bool) { - if last { - *head_in = 0 - } else { - b := &da.Blocks[bi] - da.Blocks[b.Prev].Next = b.Next - da.Blocks[b.Next].Prev = b.Prev - if bi == *head_in { - *head_in = b.Next - } - } -} - -func (da *cedar) pushBlock(bi int, head_out *int, empty bool) { - b := &da.Blocks[bi] - if empty { - *head_out, b.Prev, b.Next = bi, bi, bi - } else { - tail_out := &da.Blocks[*head_out].Prev - b.Prev = *tail_out - b.Next = *head_out - *head_out, *tail_out, da.Blocks[*tail_out].Next = bi, bi, bi - } -} - -func (da *cedar) addBlock() int { - if da.Size == da.Capacity { - da.Capacity *= 2 - - oldArray := da.Array - da.Array = make([]node, da.Capacity) - copy(da.Array, oldArray) - - oldNinfo := da.Ninfos - da.Ninfos = make([]ninfo, da.Capacity) - copy(da.Ninfos, oldNinfo) - - oldBlock := da.Blocks - da.Blocks = make([]block, da.Capacity>>8) - copy(da.Blocks, oldBlock) - } - - da.Blocks[da.Size>>8].init() - da.Blocks[da.Size>>8].Ehead = da.Size - - da.Array[da.Size] = node{-(da.Size + 255), -(da.Size + 1)} - for i := da.Size + 1; i < da.Size+255; i++ { - da.Array[i] = node{-(i - 1), -(i + 1)} - } - da.Array[da.Size+255] = node{-(da.Size + 254), -da.Size} - - da.pushBlock(da.Size>>8, &da.BheadO, da.BheadO == 0) - da.Size += 256 - return da.Size>>8 - 1 -} - -func (da *cedar) transferBlock(bi int, head_in, head_out *int) { - da.popBlock(bi, head_in, bi == da.Blocks[bi].Next) - da.pushBlock(bi, head_out, *head_out == 0 && da.Blocks[bi].Num != 0) -} - -func (da *cedar) popEnode(base int, label byte, from int) int { - e := base ^ int(label) - if base < 0 { - e = da.findPlace() - } - bi := e >> 8 - n := &da.Array[e] - b := &da.Blocks[bi] - b.Num-- - if b.Num == 0 { - if bi != 0 { - da.transferBlock(bi, &da.BheadC, &da.BheadF) - } - } else { - da.Array[-n.Value].Check = n.Check - da.Array[-n.Check].Value = n.Value - if e == b.Ehead { - b.Ehead = -n.Check - } - if bi != 0 && b.Num == 1 && b.Trial != da.MaxTrial { - da.transferBlock(bi, &da.BheadO, &da.BheadC) - } - } - n.Value = ValueLimit - n.Check = from - if base < 0 { - da.Array[from].Value = -(e ^ int(label)) - 1 - } - return e -} - -func (da *cedar) pushEnode(e int) { - bi := e >> 8 - b := &da.Blocks[bi] - b.Num++ - if b.Num == 1 { - b.Ehead = e - da.Array[e] = node{-e, -e} - if bi != 0 { - da.transferBlock(bi, &da.BheadF, &da.BheadC) - } - } else { - prev := b.Ehead - next := -da.Array[prev].Check - da.Array[e] = node{-prev, -next} - da.Array[prev].Check = -e - da.Array[next].Value = -e - if b.Num == 2 || b.Trial == da.MaxTrial { - if bi != 0 { - da.transferBlock(bi, &da.BheadC, &da.BheadO) - } - } - b.Trial = 0 - } - if b.Reject < da.Reject[b.Num] { - b.Reject = da.Reject[b.Num] - } - da.Ninfos[e] = ninfo{} -} - -// hasChild: wherether the `from` node has children -func (da *cedar) pushSibling(from, base int, label byte, hasChild bool) { - c := &da.Ninfos[from].Child - keepOrder := *c == 0 - if da.Ordered { - keepOrder = label > *c - } - if hasChild && keepOrder { - c = &da.Ninfos[base^int(*c)].Sibling - for da.Ordered && *c != 0 && *c < label { - c = &da.Ninfos[base^int(*c)].Sibling - } - } - da.Ninfos[base^int(label)].Sibling = *c - *c = label -} - -func (da *cedar) popSibling(from, base int, label byte) { - c := &da.Ninfos[from].Child - for *c != label { - c = &da.Ninfos[base^int(*c)].Sibling - } - *c = da.Ninfos[base^int(*c)].Sibling -} - -func (da *cedar) consult(base_n, base_p int, c_n, c_p byte) bool { - c_n = da.Ninfos[base_n^int(c_n)].Sibling - c_p = da.Ninfos[base_p^int(c_p)].Sibling - for c_n != 0 && c_p != 0 { - c_n = da.Ninfos[base_n^int(c_n)].Sibling - c_p = da.Ninfos[base_p^int(c_p)].Sibling - } - return c_p != 0 -} - -func (da *cedar) setChild(base int, c byte, label byte, flag bool) []byte { - child := make([]byte, 0, 257) - if c == 0 { - child = append(child, c) - c = da.Ninfos[base^int(c)].Sibling - } - if da.Ordered { - for c != 0 && c <= label { - child = append(child, c) - c = da.Ninfos[base^int(c)].Sibling - } - } - if flag { - child = append(child, label) - } - for c != 0 { - child = append(child, c) - c = da.Ninfos[base^int(c)].Sibling - } - return child -} - -func (da *cedar) findPlace() int { - if da.BheadC != 0 { - return da.Blocks[da.BheadC].Ehead - } - if da.BheadO != 0 { - return da.Blocks[da.BheadO].Ehead - } - return da.addBlock() << 8 -} - -func (da *cedar) findPlaces(child []byte) int { - bi := da.BheadO - if bi != 0 { - bz := da.Blocks[da.BheadO].Prev - nc := len(child) - for { - b := &da.Blocks[bi] - if b.Num >= nc && nc < b.Reject { - for e := b.Ehead; ; { - base := e ^ int(child[0]) - for i := 0; da.Array[base^int(child[i])].Check < 0; i++ { - if i == len(child)-1 { - b.Ehead = e - return e - } - } - e = -da.Array[e].Check - if e == b.Ehead { - break - } - } - } - b.Reject = nc - if b.Reject < da.Reject[b.Num] { - da.Reject[b.Num] = b.Reject - } - bi_ := b.Next - b.Trial++ - if b.Trial == da.MaxTrial { - da.transferBlock(bi, &da.BheadO, &da.BheadC) - } - if bi == bz { - break - } - bi = bi_ - } - } - return da.addBlock() << 8 -} - -func (da *cedar) resolve(from_n, base_n int, label_n byte) int { - to_pn := base_n ^ int(label_n) - from_p := da.Array[to_pn].Check - base_p := da.Array[from_p].base() - - flag := da.consult(base_n, base_p, da.Ninfos[from_n].Child, da.Ninfos[from_p].Child) - var children []byte - if flag { - children = da.setChild(base_n, da.Ninfos[from_n].Child, label_n, true) - } else { - children = da.setChild(base_p, da.Ninfos[from_p].Child, 255, false) - } - var base int - if len(children) == 1 { - base = da.findPlace() - } else { - base = da.findPlaces(children) - } - base ^= int(children[0]) - var from int - var base_ int - if flag { - from = from_n - base_ = base_n - } else { - from = from_p - base_ = base_p - } - if flag && children[0] == label_n { - da.Ninfos[from].Child = label_n - } - da.Array[from].Value = -base - 1 - for i := 0; i < len(children); i++ { - to := da.popEnode(base, children[i], from) - to_ := base_ ^ int(children[i]) - if i == len(children)-1 { - da.Ninfos[to].Sibling = 0 - } else { - da.Ninfos[to].Sibling = children[i+1] - } - if flag && to_ == to_pn { // new node has no child - continue - } - n := &da.Array[to] - n_ := &da.Array[to_] - n.Value = n_.Value - if n.Value < 0 && children[i] != 0 { - // this node has children, fix their check - c := da.Ninfos[to_].Child - da.Ninfos[to].Child = c - da.Array[n.base()^int(c)].Check = to - c = da.Ninfos[n.base()^int(c)].Sibling - for c != 0 { - da.Array[n.base()^int(c)].Check = to - c = da.Ninfos[n.base()^int(c)].Sibling - } - } - if !flag && to_ == from_n { // parent node moved - from_n = to - } - if !flag && to_ == to_pn { - da.pushSibling(from_n, to_pn^int(label_n), label_n, true) - da.Ninfos[to_].Child = 0 - n_.Value = ValueLimit - n_.Check = from_n - } else { - da.pushEnode(to_) - } - } - if flag { - return base ^ int(label_n) - } - return to_pn -} diff --git a/vendor/github.com/adamzy/cedar-go/doc.go b/vendor/github.com/adamzy/cedar-go/doc.go deleted file mode 100755 index 8382ddc..0000000 --- a/vendor/github.com/adamzy/cedar-go/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package cedar-go implements double-array trie. -// -// It is a golang port of cedar (http://www.tkl.iis.u-tokyo.ac.jp/~ynaga/cedar) which is written in C++ by Naoki Yoshinaga. -// Currently cedar-go implements the `reduced` verion of cedar. -// This package is not thread safe if there is one goroutine doing -// insertions or deletions. -// -// Note -// -// key must be `[]byte` without zero items, -// while value must be integer in the range [0, 2<<63-2] or [0, 2<<31-2] depends on the platform. -package cedar diff --git a/vendor/github.com/adamzy/cedar-go/errors.go b/vendor/github.com/adamzy/cedar-go/errors.go deleted file mode 100755 index 8b65285..0000000 --- a/vendor/github.com/adamzy/cedar-go/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -package cedar - -import "errors" - -var ( - ErrInvalidDataType = errors.New("cedar: invalid datatype") - ErrInvalidValue = errors.New("cedar: invalid value") - ErrInvalidKey = errors.New("cedar: invalid key") - ErrNoPath = errors.New("cedar: no path") - ErrNoValue = errors.New("cedar: no value") -) diff --git a/vendor/github.com/adamzy/cedar-go/io.go b/vendor/github.com/adamzy/cedar-go/io.go deleted file mode 100755 index b11f595..0000000 --- a/vendor/github.com/adamzy/cedar-go/io.go +++ /dev/null @@ -1,63 +0,0 @@ -package cedar - -import ( - "bufio" - "encoding/gob" - "encoding/json" - "io" - "os" -) - -// Save saves the cedar to an io.Writer, -// where dataType is either "json" or "gob". -func (da *Cedar) Save(out io.Writer, dataType string) error { - switch dataType { - case "gob", "GOB": - dataEecoder := gob.NewEncoder(out) - return dataEecoder.Encode(da.cedar) - case "json", "JSON": - dataEecoder := json.NewEncoder(out) - return dataEecoder.Encode(da.cedar) - } - return ErrInvalidDataType -} - -// SaveToFile saves the cedar to a file, -// where dataType is either "json" or "gob". -func (da *Cedar) SaveToFile(fileName string, dataType string) error { - file, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY, 0666) - if err != nil { - return err - } - defer file.Close() - out := bufio.NewWriter(file) - defer out.Flush() - da.Save(out, dataType) - return nil -} - -// Load loads the cedar from an io.Writer, -// where dataType is either "json" or "gob". -func (da *Cedar) Load(in io.Reader, dataType string) error { - switch dataType { - case "gob", "GOB": - dataDecoder := gob.NewDecoder(in) - return dataDecoder.Decode(da.cedar) - case "json", "JSON": - dataDecoder := json.NewDecoder(in) - return dataDecoder.Decode(da.cedar) - } - return ErrInvalidDataType -} - -// LoadFromFile loads the cedar from a file, -// where dataType is either "json" or "gob". -func (da *Cedar) LoadFromFile(fileName string, dataType string) error { - file, err := os.OpenFile(fileName, os.O_RDONLY, 0600) - defer file.Close() - if err != nil { - return err - } - in := bufio.NewReader(file) - return da.Load(in, dataType) -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/CHANGELOG.md b/vendor/github.com/aliyun/aliyun-oss-go-sdk/CHANGELOG.md deleted file mode 100644 index 7eb0df1..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/CHANGELOG.md +++ /dev/null @@ -1,89 +0,0 @@ -# ChangeLog - Aliyun OSS SDK for Go -## 版本号:1.9.1 日期:2018-09-17 -### 变更内容 - - 变更:支持ipv6 - - 变更:支持修改对象的存储类型 - - 修复:修改sample中GetBucketReferer方法名拼写错误 - - 修复:修复NopCloser在close的时候并不释放内存的内存泄漏问题 - - 变更:增加ProcessObject接口 - - 修复:修改图片处理接口参数拼写错误导致无法处理的bug - - 修复:增加ListUploadedParts接口的options选项 - - 修复:增加Callback&CallbackVal选项,支持回调使用 - - 修复:GetObject接口返回Response,支持用户读取crc等返回值 - - 修复:当以压缩格式返回数据时,GetObject接口不校验crc - -## 版本号:1.9.0 日期:2018-06-15 -### 变更内容 - - 变更:国际化 - -## 版本号:1.8.0 日期:2017-12-12 -### 变更内容 - - 变更:空闲链接关闭时间调整为50秒 - - 修复:修复临时账号使用SignURL的问题 - -## 版本号:1.7.0 日期:2017-09-25 -### 变更内容 - - 增加:DownloadFile支持CRC校验 - - 增加:STS测试用例 - -## 版本号:1.6.0 日期:2017-09-01 -### 变更内容 - - 修复:URL中特殊字符的编码问题 - - 变更:不再支持Golang 1.4 - -## 版本号:1.5.1 日期:2017-08-04 -### 变更内容 - - 修复:SignURL中Key编码的问题 - - 修复:DownloadFile下载完成后rename失败的问题 - -## 版本号:1.5.0 日期:2017-07-25 -### 变更内容 - - 增加:支持生成URL签名 - - 增加:GetObject支持ResponseContentType等选项 - - 修复:DownloadFile去除分片小于5GB的限制 - - 修复:AppendObject在appendPosition不正确时发生panic - -## 版本号:1.4.0 日期:2017-05-23 -### 变更内容 - - 增加:支持符号链接symlink - - 增加:支持RestoreObject - - 增加:CreateBucket支持StorageClass - - 增加:支持范围读NormalizedRange - - 修复:IsObjectExist使用GetObjectMeta实现 - -## 版本号:1.3.0 日期:2017-01-13 -### 变更内容 - - 增加:上传下载支持进度条功能 - -## 版本号:1.2.3 日期:2016-12-28 -### 变更内容 - - 修复:每次请求使用一个http.Client修改为共用http.Client - -## 版本号:1.2.2 日期:2016-12-10 -### 变更内容 - - 修复:GetObjectToFile/DownloadFile使用临时文件下载,成功后重命名成下载文件 - - 修复:新建的下载文件权限修改为0664 - -## 版本号:1.2.1 日期:2016-11-11 -### 变更内容 - - 修复:只有当OSS返回x-oss-hash-crc64ecma头部时,才对上传的文件进行CRC64完整性校验 - -## 版本号:1.2.0 日期:2016-10-18 -### 变更内容 - - 增加:支持CRC64校验 - - 增加:支持指定Useragent - - 修复:计算MD5占用内存大的问题 - - 修复:CopyObject时Object名称没有URL编码的问题 - -## 版本号:1.1.0 日期:2016-08-09 -### 变更内容 - - 增加:支持代理服务器 - -## 版本号:1.0.0 日期:2016-06-24 -### 变更内容 - - 增加:断点分片复制接口Bucket.CopyFile - - 增加:Bucket间复制接口Bucket.CopyObjectTo、Bucket.CopyObjectFrom - - 增加:Client.GetBucketInfo接口 - - 增加:Bucket.UploadPartCopy支持Bucket间复制 - - 修复:断点上传、断点下载出错后,协程不退出的Bug - - 删除:接口Bucket.CopyObjectToBucket diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/README-CN.md b/vendor/github.com/aliyun/aliyun-oss-go-sdk/README-CN.md deleted file mode 100644 index 31ded02..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/README-CN.md +++ /dev/null @@ -1,167 +0,0 @@ -# Aliyun OSS SDK for Go - -[![GitHub version](https://badge.fury.io/gh/aliyun%2Faliyun-oss-go-sdk.svg)](https://badge.fury.io/gh/aliyun%2Faliyun-oss-go-sdk) -[![Build Status](https://travis-ci.org/aliyun/aliyun-oss-go-sdk.svg?branch=master)](https://travis-ci.org/aliyun/aliyun-oss-go-sdk) -[![Coverage Status](https://coveralls.io/repos/github/aliyun/aliyun-oss-go-sdk/badge.svg?branch=master)](https://coveralls.io/github/aliyun/aliyun-oss-go-sdk?branch=master) - -## [README of English](https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/README.md) - -## 关于 -> - 此Go SDK基于[阿里云对象存储服务](http://www.aliyun.com/product/oss/)官方API构建。 -> - 阿里云对象存储(Object Storage Service,简称OSS),是阿里云对外提供的海量,安全,低成本,高可靠的云存储服务。 -> - OSS适合存放任意文件类型,适合各种网站、开发企业及开发者使用。 -> - 使用此SDK,用户可以方便地在任何应用、任何时间、任何地点上传,下载和管理数据。 - -## 版本 -> - 当前版本:1.9.1 - -## 运行环境 -> - Go 1.5及以上。 - -## 安装方法 -### GitHub安装 -> - 执行命令`go get github.com/aliyun/aliyun-oss-go-sdk/oss`获取远程代码包。 -> - 在您的代码中使用`import "github.com/aliyun/aliyun-oss-go-sdk/oss"`引入OSS Go SDK的包。 - -## 快速使用 -#### 获取存储空间列表(List Bucket) -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - lsRes, err := client.ListBuckets() - if err != nil { - // HandleError(err) - } - - for _, bucket := range lsRes.Buckets { - fmt.Println("Buckets:", bucket.Name) - } -``` - -#### 创建存储空间(Create Bucket) -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - err = client.CreateBucket("my-bucket") - if err != nil { - // HandleError(err) - } -``` - -#### 删除存储空间(Delete Bucket) -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - err = client.DeleteBucket("my-bucket") - if err != nil { - // HandleError(err) - } -``` - -#### 上传文件(Put Object) -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - bucket, err := client.Bucket("my-bucket") - if err != nil { - // HandleError(err) - } - - err = bucket.PutObjectFromFile("my-object", "LocalFile") - if err != nil { - // HandleError(err) - } -``` - -#### 下载文件 (Get Object) -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - bucket, err := client.Bucket("my-bucket") - if err != nil { - // HandleError(err) - } - - err = bucket.GetObjectToFile("my-object", "LocalFile") - if err != nil { - // HandleError(err) - } -``` - -#### 获取文件列表(List Objects) -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - bucket, err := client.Bucket("my-bucket") - if err != nil { - // HandleError(err) - } - - lsRes, err := bucket.ListObjects() - if err != nil { - // HandleError(err) - } - - for _, object := range lsRes.Objects { - fmt.Println("Objects:", object.Key) - } -``` - -#### 删除文件(Delete Object) -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - bucket, err := client.Bucket("my-bucket") - if err != nil { - // HandleError(err) - } - - err = bucket.DeleteObject("my-object") - if err != nil { - // HandleError(err) - } -``` - -#### 其它 -更多的示例程序,请参看OSS Go SDK安装路径(即GOPATH变量中的第一个路径)下的`src\github.com\aliyun\aliyun-oss-go-sdk\sample`,该目录下为示例程序, -或者参看`https://github.com/aliyun/aliyun-oss-go-sdk`下sample目录中的示例文件。 - -## 注意事项 -### 运行sample -> - 拷贝示例文件。到OSS Go SDK的安装路径(即GOPATH变量中的第一个路径),进入OSS Go SDK的代码目录`src\github.com\aliyun\aliyun-oss-go-sdk`, -把其下的sample目录和sample.go复制到您的测试工程src目录下。 -> - 修改sample/config.go里的endpoint、AccessKeyId、AccessKeySecret、BucketName等配置。 -> - 请在您的工程目录下执行`go run src/sample.go`。 - -## 联系我们 -> - [阿里云OSS官方网站](http://oss.aliyun.com) -> - [阿里云OSS官方论坛](http://bbs.aliyun.com) -> - [阿里云OSS官方文档中心](http://www.aliyun.com/product/oss#Docs) -> - 阿里云官方技术支持:[提交工单](https://workorder.console.aliyun.com/#/ticket/createIndex) - -## 作者 -> - Yubin Bai - -## License -> - Apache License 2.0 diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/README.md b/vendor/github.com/aliyun/aliyun-oss-go-sdk/README.md deleted file mode 100644 index 37b69a0..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/README.md +++ /dev/null @@ -1,166 +0,0 @@ -# Alibaba Cloud OSS SDK for Go - -[![GitHub Version](https://badge.fury.io/gh/aliyun%2Faliyun-oss-go-sdk.svg)](https://badge.fury.io/gh/aliyun%2Faliyun-oss-go-sdk) -[![Build Status](https://travis-ci.org/aliyun/aliyun-oss-go-sdk.svg?branch=master)](https://travis-ci.org/aliyun/aliyun-oss-go-sdk) -[![Coverage Status](https://coveralls.io/repos/github/aliyun/aliyun-oss-go-sdk/badge.svg?branch=master)](https://coveralls.io/github/aliyun/aliyun-oss-go-sdk?branch=master) - -## [README of Chinese](https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/README-CN.md) - -## About -> - This Go SDK is based on the official APIs of [Alibaba Cloud OSS](http://www.aliyun.com/product/oss/). -> - Alibaba Cloud Object Storage Service (OSS) is a cloud storage service provided by Alibaba Cloud, featuring massive capacity, security, a low cost, and high reliability. -> - The OSS can store any type of files and therefore applies to various websites, development enterprises and developers. -> - With this SDK, you can upload, download and manage data on any app anytime and anywhere conveniently. - -## Version -> - Current version: 1.9.1. - -## Running Environment -> - Go 1.5 or above. - -## Installing -### Install the SDK through GitHub -> - Run the 'go get github.com/aliyun/aliyun-oss-go-sdk/oss' command to get the remote code package. -> - Use 'import "github.com/aliyun/aliyun-oss-go-sdk/oss"' in your code to introduce OSS Go SDK package. - -## Getting Started -### List Bucket -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - lsRes, err := client.ListBuckets() - if err != nil { - // HandleError(err) - } - - for _, bucket := range lsRes.Buckets { - fmt.Println("Buckets:", bucket.Name) - } -``` - -### Create Bucket -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - err = client.CreateBucket("my-bucket") - if err != nil { - // HandleError(err) - } -``` - -### Delete Bucket -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - err = client.DeleteBucket("my-bucket") - if err != nil { - // HandleError(err) - } -``` - -### Put Object -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - bucket, err := client.Bucket("my-bucket") - if err != nil { - // HandleError(err) - } - - err = bucket.PutObjectFromFile("my-object", "LocalFile") - if err != nil { - // HandleError(err) - } -``` - -### Get Object -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - bucket, err := client.Bucket("my-bucket") - if err != nil { - // HandleError(err) - } - - err = bucket.GetObjectToFile("my-object", "LocalFile") - if err != nil { - // HandleError(err) - } -``` - -### List Objects -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - bucket, err := client.Bucket("my-bucket") - if err != nil { - // HandleError(err) - } - - lsRes, err := bucket.ListObjects() - if err != nil { - // HandleError(err) - } - - for _, object := range lsRes.Objects { - fmt.Println("Objects:", object.Key) - } -``` - -### Delete Object -```go - client, err := oss.New("Endpoint", "AccessKeyId", "AccessKeySecret") - if err != nil { - // HandleError(err) - } - - bucket, err := client.Bucket("my-bucket") - if err != nil { - // HandleError(err) - } - - err = bucket.DeleteObject("my-object") - if err != nil { - // HandleError(err) - } -``` - -## Complete Example -More example projects can be found at 'src\github.com\aliyun\aliyun-oss-go-sdk\sample' under the installation path of the OSS Go SDK (the first path of the GOPATH variable). The directory contains example projects. -Or you can refer to the example objects in the sample directory under 'https://github.com/aliyun/aliyun-oss-go-sdk'. - -### Running Example -> - Copy the example file. Go to the installation path of OSS Go SDK (the first path of the GOPATH variable), enter the code directory of the OSS Go SDK, namely 'src\github.com\aliyun\aliyun-oss-go-sdk', -and copy the sample directory and sample.go to the src directory of your test project. -> - Modify the endpoint, AccessKeyId, AccessKeySecret and BucketName configuration settings in sample/config.go. -> - Run 'go run src/sample.go' under your project directory. - -## Contacting us -> - [Alibaba Cloud OSS official website](http://oss.aliyun.com). -> - [Alibaba Cloud OSS official forum](http://bbs.aliyun.com). -> - [Alibaba Cloud OSS official documentation center](http://www.aliyun.com/product/oss#Docs). -> - Alibaba Cloud official technical support: [Submit a ticket](https://workorder.console.aliyun.com/#/ticket/createIndex). - -## Author -> - Yubin Bai. - -## License -> - Apache License 2.0. diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go deleted file mode 100755 index c428656..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go +++ /dev/null @@ -1,92 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "hash" - "io" - "net/http" - "sort" - "strings" -) - -// 用于signHeader的字典排序存放容器。 -type headerSorter struct { - Keys []string - Vals []string -} - -// 生成签名方法(直接设置请求的Header)。 -func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) { - // Find out the "x-oss-"'s address in this request'header - temp := make(map[string]string) - - for k, v := range req.Header { - if strings.HasPrefix(strings.ToLower(k), "x-oss-") { - temp[strings.ToLower(k)] = v[0] - } - } - hs := newHeaderSorter(temp) - - // Sort the temp by the Ascending Order - hs.Sort() - - // Get the CanonicalizedOSSHeaders - canonicalizedOSSHeaders := "" - for i := range hs.Keys { - canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" - } - - // Give other parameters values - date := req.Header.Get(HTTPHeaderDate) - contentType := req.Header.Get(HTTPHeaderContentType) - contentMd5 := req.Header.Get(HTTPHeaderContentMD5) - - signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource - h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret)) - io.WriteString(h, signStr) - signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) - - // Get the final Authorization' string - authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + signedStr - - // Give the parameter "Authorization" value - req.Header.Set(HTTPHeaderAuthorization, authorizationStr) -} - -// Additional function for function SignHeader. -func newHeaderSorter(m map[string]string) *headerSorter { - hs := &headerSorter{ - Keys: make([]string, 0, len(m)), - Vals: make([]string, 0, len(m)), - } - - for k, v := range m { - hs.Keys = append(hs.Keys, k) - hs.Vals = append(hs.Vals, v) - } - return hs -} - -// Additional function for function SignHeader. -func (hs *headerSorter) Sort() { - sort.Sort(hs) -} - -// Additional function for function SignHeader. -func (hs *headerSorter) Len() int { - return len(hs.Vals) -} - -// Additional function for function SignHeader. -func (hs *headerSorter) Less(i, j int) bool { - return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 -} - -// Additional function for function SignHeader. -func (hs *headerSorter) Swap(i, j int) { - hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] - hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go deleted file mode 100755 index fd4fad7..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go +++ /dev/null @@ -1,618 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/xml" - "hash/crc64" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strconv" -) - -// Bucket implements the operations of object. -type Bucket struct { - Client Client - BucketName string -} - -// -// PutObject 新建Object,如果Object已存在,覆盖原有Object。 -// -// objectKey 上传对象的名称,使用UTF-8编码、长度必须在1-1023字节之间、不能以“/”或者“\”字符开头。 -// reader io.Reader读取object的数据。 -// options 上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、 -// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看 -// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error { - opts := addContentType(options, objectKey) - - request := &PutObjectRequest{ - ObjectKey: objectKey, - Reader: reader, - } - resp, err := bucket.DoPutObject(request, opts) - if err != nil { - return err - } - defer resp.Body.Close() - - return err -} - -// -// PutObjectFromFile 新建Object,内容从本地文件中读取。 -// -// objectKey 上传对象的名称。 -// filePath 本地文件,上传对象的值为该文件内容。 -// options 上传对象时可以指定对象的属性。详见PutObject的options。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error { - fd, err := os.Open(filePath) - if err != nil { - return err - } - defer fd.Close() - - opts := addContentType(options, filePath, objectKey) - - request := &PutObjectRequest{ - ObjectKey: objectKey, - Reader: fd, - } - resp, err := bucket.DoPutObject(request, opts) - if err != nil { - return err - } - defer resp.Body.Close() - - return err -} - -// -// DoPutObject 上传文件。 -// -// request 上传请求。 -// options 上传选项。 -// -// Response 上传请求返回值。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) { - isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType) - if !isOptSet { - options = addContentType(options, request.ObjectKey) - } - - resp, err := bucket.do("PUT", request.ObjectKey, "", "", options, request.Reader) - if err != nil { - return nil, err - } - - if bucket.getConfig().IsEnableCRC { - err = checkCRC(resp, "DoPutObject") - if err != nil { - return resp, err - } - } - - err = checkRespCode(resp.StatusCode, []int{http.StatusOK}) - - return resp, err -} - -// -// GetObject 下载文件。 -// -// objectKey 下载的文件名称。 -// options 对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、 -// IfNoneMatch、AcceptEncoding,详细请参考 -// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html -// -// io.ReadCloser reader,读取数据后需要close。error为nil时有效。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) { - result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) - if err != nil { - return nil, err - } - return result.Response.Body, nil -} - -// -// GetObjectToFile 下载文件。 -// -// objectKey 下载的文件名称。 -// filePath 下载对象的内容写到该本地文件。 -// options 对象的属性限制项。详见GetObject的options。 -// -// error 操作无错误时返回error为nil,非nil为错误说明。 -// -func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error { - // 读取Object内容 - result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) - if err != nil { - return err - } - defer result.Response.Body.Close() - - // 如果文件不存在则创建,存在则清空 - fd, err := os.OpenFile(filePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660) - if err != nil { - return err - } - defer fd.Close() - - // 存储数据到文件 - _, err = io.Copy(fd, result.Response.Body) - if err != nil { - return err - } - - // 比较CRC值 - hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) - if bucket.getConfig().IsEnableCRC && !hasRange { - result.Response.ClientCRC = result.ClientCRC.Sum64() - err = checkCRC(result.Response, "GetObjectToFile") - if err != nil { - return err - } - } - - return nil -} - -// -// DoGetObject 下载文件 -// -// request 下载请求 -// options 对象的属性限制项。详见GetObject的options。 -// -// GetObjectResult 下载请求返回值。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) { - resp, err := bucket.do("GET", request.ObjectKey, "", "", options, nil) - if err != nil { - return nil, err - } - - result := &GetObjectResult{ - Response: resp, - } - - hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) - if bucket.getConfig().IsEnableCRC && !hasRange { - crcCalc := crc64.New(crcTable()) - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, crcCalc)) - result.ServerCRC = resp.ServerCRC - result.ClientCRC = crcCalc - } - - return result, nil -} - -// -// CopyObject 同一个bucket内拷贝Object。 -// -// srcObjectKey Copy的源对象。 -// destObjectKey Copy的目标对象。 -// options Copy对象时,您可以指定源对象的限制条件,满足限制条件时copy,不满足时返回错误,您可以选择如下选项CopySourceIfMatch、 -// CopySourceIfNoneMatch、CopySourceIfModifiedSince、CopySourceIfUnmodifiedSince、MetadataDirective。 -// Copy对象时,您可以指定目标对象的属性,如CacheControl、ContentDisposition、ContentEncoding、Expires、 -// ServerSideEncryption、ObjectACL、Meta,选项的含义请参看 -// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { - var out CopyObjectResult - options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) - resp, err := bucket.do("PUT", destObjectKey, "", "", options, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// CopyObjectTo bucket间拷贝object。 -// -// srcObjectKey 源Object名称。源Bucket名称为Bucket.BucketName。 -// destBucketName 目标Bucket名称。 -// destObjectKey 目标Object名称。 -// options Copy选项,详见CopyObject的options。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) { - return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) -} - -// -// CopyObjectFrom bucket间拷贝object。 -// -// srcBucketName 源Bucket名称。 -// srcObjectKey 源Object名称。 -// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。 -// options Copy选项,详见CopyObject的options。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { - destBucketName := bucket.BucketName - var out CopyObjectResult - srcBucket, err := bucket.Client.Bucket(srcBucketName) - if err != nil { - return out, err - } - - return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) -} - -func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) { - var out CopyObjectResult - options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) - headers := make(map[string]string) - err := handleOptions(headers, options) - if err != nil { - return out, err - } - resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, "", "", headers, nil, 0) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// AppendObject 追加方式上传。 -// -// AppendObject参数必须包含position,其值指定从何处进行追加。首次追加操作的position必须为0, -// 后续追加操作的position是Object的当前长度。例如,第一次Append Object请求指定position值为0, -// content-length是65536;那么,第二次Append Object需要指定position为65536。 -// 每次操作成功后,响应头部x-oss-next-append-position也会标明下一次追加的position。 -// -// objectKey 需要追加的Object。 -// reader io.Reader,读取追的内容。 -// appendPosition object追加的起始位置。 -// destObjectProperties 第一次追加时指定新对象的属性,如CacheControl、ContentDisposition、ContentEncoding、 -// Expires、ServerSideEncryption、ObjectACL。 -// -// int64 下次追加的开始位置,error为nil空时有效。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) { - request := &AppendObjectRequest{ - ObjectKey: objectKey, - Reader: reader, - Position: appendPosition, - } - - result, err := bucket.DoAppendObject(request, options) - - return result.NextPosition, err -} - -// -// DoAppendObject 追加上传。 -// -// request 追加上传请求。 -// options 追加上传选项。 -// -// AppendObjectResult 追加上传请求返回值。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) { - params := "append&position=" + strconv.FormatInt(request.Position, 10) - headers := make(map[string]string) - - opts := addContentType(options, request.ObjectKey) - handleOptions(headers, opts) - - var initCRC uint64 - isCRCSet, initCRCStr, _ := isOptionSet(options, initCRC64) - if isCRCSet { - initCRC, _ = strconv.ParseUint(initCRCStr, 10, 64) - } - - handleOptions(headers, opts) - resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, params, headers, request.Reader, initCRC) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64) - result := &AppendObjectResult{ - NextPosition: nextPosition, - CRC: resp.ServerCRC, - } - - if bucket.getConfig().IsEnableCRC && isCRCSet { - err = checkCRC(resp, "AppendObject") - if err != nil { - return result, err - } - } - - return result, nil -} - -// -// DeleteObject 删除Object。 -// -// objectKey 待删除Object。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) DeleteObject(objectKey string) error { - resp, err := bucket.do("DELETE", objectKey, "", "", nil, nil) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) -} - -// -// DeleteObjects 批量删除object。 -// -// objectKeys 待删除object类表。 -// options 删除选项,DeleteObjectsQuiet,是否是安静模式,默认不使用。 -// -// DeleteObjectsResult 非安静模式的的返回值。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) { - out := DeleteObjectsResult{} - dxml := deleteXML{} - for _, key := range objectKeys { - dxml.Objects = append(dxml.Objects, DeleteObject{Key: key}) - } - isQuietStr, _ := findOption(options, deleteObjectsQuiet, "FALSE") - isQuiet, _ := strconv.ParseBool(isQuietStr) - dxml.Quiet = isQuiet - encode := "&encoding-type=url" - - bs, err := xml.Marshal(dxml) - if err != nil { - return out, err - } - buffer := new(bytes.Buffer) - buffer.Write(bs) - - contentType := http.DetectContentType(buffer.Bytes()) - options = append(options, ContentType(contentType)) - sum := md5.Sum(bs) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - options = append(options, ContentMD5(b64)) - resp, err := bucket.do("POST", "", "delete"+encode, "delete", options, buffer) - if err != nil { - return out, err - } - defer resp.Body.Close() - - if !dxml.Quiet { - if err = xmlUnmarshal(resp.Body, &out); err == nil { - err = decodeDeleteObjectsResult(&out) - } - } - return out, err -} - -// -// IsObjectExist object是否存在。 -// -// bool object是否存在,true存在,false不存在。error为nil时有效。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) { - listRes, err := bucket.ListObjects(Prefix(objectKey), MaxKeys(1)) - if err != nil { - return false, err - } - - if len(listRes.Objects) == 1 && listRes.Objects[0].Key == objectKey { - return true, nil - } - return false, nil -} - -// -// ListObjects 获得Bucket下筛选后所有的object的列表。 -// -// options ListObject的筛选行为。Prefix指定的前缀、MaxKeys最大数目、Marker第一个开始、Delimiter对Object名字进行分组的字符。 -// -// 您有如下8个object,my-object-1, my-object-11, my-object-2, my-object-21, -// my-object-22, my-object-3, my-object-31, my-object-32。如果您指定了Prefix为my-object-2, -// 则返回my-object-2, my-object-21, my-object-22三个object。如果您指定了Marker为my-object-22, -// 则返回my-object-3, my-object-31, my-object-32三个object。如果您指定MaxKeys则每次最多返回MaxKeys个, -// 最后一次可能不足。这三个参数可以组合使用,实现分页等功能。如果把prefix设为某个文件夹名,就可以罗列以此prefix开头的文件, -// 即该文件夹下递归的所有的文件和子文件夹。如果再把delimiter设置为"/"时,返回值就只罗列该文件夹下的文件,该文件夹下的子文件名 -// 返回在CommonPrefixes部分,子文件夹下递归的文件和文件夹不被显示。例如一个bucket存在三个object,fun/test.jpg、 -// fun/movie/001.avi、fun/movie/007.avi。若设定prefix为"fun/",则返回三个object;如果增加设定 -// delimiter为"/",则返回文件"fun/test.jpg"和前缀"fun/movie/",即实现了文件夹的逻辑。 -// -// 常用场景,请参数示例sample/list_object.go。 -// -// ListObjectsResponse 操作成功后的返回值,成员Objects为bucket中对象列表。error为nil时该返回值有效。 -// -func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) { - var out ListObjectsResult - - options = append(options, EncodingType("url")) - params, err := handleParams(options) - if err != nil { - return out, err - } - - resp, err := bucket.do("GET", "", params, "", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return out, err - } - - err = decodeListObjectsResult(&out) - return out, err -} - -// -// SetObjectMeta 设置Object的Meta。 -// -// objectKey object -// options 指定对象的属性,有以下可选项CacheControl、ContentDisposition、ContentEncoding、Expires、 -// ServerSideEncryption、Meta。 -// -// error 操作无错误时error为nil,非nil为错误信息。 -// -func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error { - options = append(options, MetadataDirective(MetaReplace)) - _, err := bucket.CopyObject(objectKey, objectKey, options...) - return err -} - -// -// GetObjectDetailedMeta 查询Object的头信息。 -// -// objectKey object名称。 -// objectPropertyConstraints 对象的属性限制项,满足时正常返回,不满足时返回错误。现在项有IfModifiedSince、IfUnmodifiedSince、 -// IfMatch、IfNoneMatch。具体含义请参看 https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html -// -// http.Header 对象的meta,error为nil时有效。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) { - resp, err := bucket.do("HEAD", objectKey, "", "", options, nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return resp.Headers, nil -} - -// -// GetObjectMeta 查询Object的头信息。 -// -// GetObjectMeta相比GetObjectDetailedMeta更轻量,仅返回指定Object的少量基本meta信息, -// 包括该Object的ETag、Size(对象大小)、LastModified,其中Size由响应头Content-Length的数值表示。 -// -// objectKey object名称。 -// -// http.Header 对象的meta,error为nil时有效。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) { - resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return resp.Headers, nil -} - -// -// SetObjectACL 修改Object的ACL权限。 -// -// 只有Bucket Owner才有权限调用PutObjectACL来修改Object的ACL。Object ACL优先级高于Bucket ACL。 -// 例如Bucket ACL是private的,而Object ACL是public-read-write的,则访问这个Object时, -// 先判断Object的ACL,所以所有用户都拥有这个Object的访问权限,即使这个Bucket是private bucket。 -// 如果某个Object从来没设置过ACL,则访问权限遵循Bucket ACL。 -// -// Object的读操作包括GetObject,HeadObject,CopyObject和UploadPartCopy中的对source object的读; -// Object的写操作包括:PutObject,PostObject,AppendObject,DeleteObject, -// DeleteMultipleObjects,CompleteMultipartUpload以及CopyObject对新的Object的写。 -// -// objectKey 设置权限的object。 -// objectAcl 对象权限。可选值PrivateACL(私有读写)、PublicReadACL(公共读私有写)、PublicReadWriteACL(公共读写)。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error { - options := []Option{ObjectACL(objectACL)} - resp, err := bucket.do("PUT", objectKey, "acl", "acl", options, nil) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusOK}) -} - -// -// GetObjectACL 获取对象的ACL权限。 -// -// objectKey 获取权限的object。 -// -// GetObjectAclResponse 获取权限操作返回值,error为nil时有效。GetObjectAclResponse.Acl为对象的权限。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) { - var out GetObjectACLResult - resp, err := bucket.do("GET", objectKey, "acl", "acl", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// Private -func (bucket Bucket) do(method, objectName, urlParams, subResource string, - options []Option, data io.Reader) (*Response, error) { - headers := make(map[string]string) - err := handleOptions(headers, options) - if err != nil { - return nil, err - } - return bucket.Client.Conn.Do(method, bucket.BucketName, objectName, - urlParams, subResource, headers, data, 0) -} - -func (bucket Bucket) getConfig() *Config { - return bucket.Client.Config -} - -func addContentType(options []Option, keys ...string) []Option { - typ := TypeByExtension("") - for _, key := range keys { - typ = TypeByExtension(key) - if typ != "" { - break - } - } - - if typ == "" { - typ = "application/octet-stream" - } - - opts := []Option{ContentType(typ)} - opts = append(opts, options...) - - return opts -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go deleted file mode 100755 index 7480d33..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go +++ /dev/null @@ -1,739 +0,0 @@ -// Package oss implements functions for access oss service. -// It has two main struct Client and Bucket. -package oss - -import ( - "bytes" - "encoding/xml" - "io" - "net/http" - "strings" - "time" -) - -// -// Client Sdk的入口,Client的方法可以完成bucket的各种操作,如create/delete bucket, -// set/get acl/lifecycle/referer/logging/website等。文件(object)的上传下载通过Bucket完成。 -// 用户用oss.New创建Client。 -// -type ( - // Client oss client - Client struct { - Config *Config // Oss Client configure - Conn *Conn // Send http request - } - - // ClientOption client option such as UseCname, Timeout, SecurityToken. - ClientOption func(*Client) -) - -// -// New 生成一个新的Client。 -// -// endpoint 用户Bucket所在数据中心的访问域名,如http://oss-cn-hangzhou.aliyuncs.com。 -// accessKeyId 用户标识。 -// accessKeySecret 用户密钥。 -// -// Client 生成的新Client。error为nil时有效。 -// error 操作无错误时为nil,非nil时表示操作出错。 -// -func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) { - config := getDefaultOssConfig() - config.Endpoint = endpoint - config.AccessKeyID = accessKeyID - config.AccessKeySecret = accessKeySecret - - url := &urlMaker{} - url.Init(config.Endpoint, config.IsCname, config.IsUseProxy) - conn := &Conn{config, url} - - client := &Client{ - config, - conn, - } - - for _, option := range options { - option(client) - } - - return client, nil -} - -// -// Bucket 取存储空间(Bucket)的对象实例。 -// -// bucketName 存储空间名称。 -// Bucket 新的Bucket。error为nil时有效。 -// -// error 操作无错误时返回nil,非nil为错误信息。 -// -func (client Client) Bucket(bucketName string) (*Bucket, error) { - return &Bucket{ - client, - bucketName, - }, nil -} - -// -// CreateBucket 创建Bucket。 -// -// bucketName bucket名称,在整个OSS中具有全局唯一性,且不能修改。bucket名称的只能包括小写字母,数字和短横线-, -// 必须以小写字母或者数字开头,长度必须在3-255字节之间。 -// options 创建bucket的选项。您可以使用选项ACL,指定bucket的访问权限。Bucket有以下三种访问权限,私有读写(ACLPrivate)、 -// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite),默认访问权限是私有读写。 -// -// error 操作无错误时返回nil,非nil为错误信息。 -// -func (client Client) CreateBucket(bucketName string, options ...Option) error { - headers := make(map[string]string) - handleOptions(headers, options) - - resp, err := client.do("PUT", bucketName, "", "", headers, nil) - if err != nil { - return err - } - - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusOK}) -} - -// -// ListBuckets 获取当前用户下的bucket。 -// -// options 指定ListBuckets的筛选行为,Prefix、Marker、MaxKeys三个选项。Prefix限定前缀。 -// Marker设定从Marker之后的第一个开始返回。MaxKeys限定此次返回的最大数目,默认为100。 -// 常用使用场景的实现,参数示例程序list_bucket.go。 -// ListBucketsResponse 操作成功后的返回值,error为nil时该返回值有效。 -// -// error 操作无错误时返回nil,非nil为错误信息。 -// -func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) { - var out ListBucketsResult - - params, err := handleParams(options) - if err != nil { - return out, err - } - - resp, err := client.do("GET", "", params, "", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// IsBucketExist Bucket是否存在。 -// -// bucketName 存储空间名称。 -// -// bool 存储空间是否存在。error为nil时有效。 -// error 操作无错误时返回nil,非nil为错误信息。 -// -func (client Client) IsBucketExist(bucketName string) (bool, error) { - listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1)) - if err != nil { - return false, err - } - - if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName { - return true, nil - } - return false, nil -} - -// -// DeleteBucket 删除空存储空间。非空时请先清理Object、Upload。 -// -// bucketName 存储空间名称。 -// -// error 操作无错误时返回nil,非nil为错误信息。 -// -func (client Client) DeleteBucket(bucketName string) error { - resp, err := client.do("DELETE", bucketName, "", "", nil, nil) - if err != nil { - return err - } - - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) -} - -// -// GetBucketLocation 查看Bucket所属数据中心位置的信息。 -// -// 如果您想了解"访问域名和数据中心"详细信息,请参看 -// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html -// -// bucketName 存储空间名称。 -// -// string Bucket所属的数据中心位置信息。 -// error 操作无错误时返回nil,非nil为错误信息。 -// -func (client Client) GetBucketLocation(bucketName string) (string, error) { - resp, err := client.do("GET", bucketName, "location", "location", nil, nil) - if err != nil { - return "", err - } - defer resp.Body.Close() - - var LocationConstraint string - err = xmlUnmarshal(resp.Body, &LocationConstraint) - return LocationConstraint, err -} - -// -// SetBucketACL 修改Bucket的访问权限。 -// -// bucketName 存储空间名称。 -// bucketAcl bucket的访问权限。Bucket有以下三种访问权限,Bucket有以下三种访问权限,私有读写(ACLPrivate)、 -// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite)。 -// -// error 操作无错误时返回nil,非nil为错误信息。 -// -func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error { - headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)} - resp, err := client.do("PUT", bucketName, "", "", headers, nil) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusOK}) -} - -// -// GetBucketACL 获得Bucket的访问权限。 -// -// bucketName 存储空间名称。 -// -// GetBucketAclResponse 操作成功后的返回值,error为nil时该返回值有效。 -// error 操作无错误时返回nil,非nil为错误信息。 -// -func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) { - var out GetBucketACLResult - resp, err := client.do("GET", bucketName, "acl", "acl", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// SetBucketLifecycle 修改Bucket的生命周期设置。 -// -// OSS提供Object生命周期管理来为用户管理对象。用户可以为某个Bucket定义生命周期配置,来为该Bucket的Object定义各种规则。 -// Bucket的拥有者可以通过SetBucketLifecycle来设置Bucket的Lifecycle配置。Lifecycle开启后,OSS将按照配置, -// 定期自动删除与Lifecycle规则相匹配的Object。如果您想了解更多的生命周期的信息,请参看 -// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html -// -// bucketName 存储空间名称。 -// rules 生命周期规则列表。生命周期规则有两种格式,指定绝对和相对过期时间,分布由days和year/month/day控制。 -// 具体用法请参考示例程序sample/bucket_lifecycle.go。 -// -// error 操作无错误时返回error为nil,非nil为错误信息。 -// -func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error { - lxml := lifecycleXML{Rules: convLifecycleRule(rules)} - bs, err := xml.Marshal(lxml) - if err != nil { - return err - } - buffer := new(bytes.Buffer) - buffer.Write(bs) - - contentType := http.DetectContentType(buffer.Bytes()) - headers := map[string]string{} - headers[HTTPHeaderContentType] = contentType - - resp, err := client.do("PUT", bucketName, "lifecycle", "lifecycle", headers, buffer) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusOK}) -} - -// -// DeleteBucketLifecycle 删除Bucket的生命周期设置。 -// -// -// bucketName 存储空间名称。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) DeleteBucketLifecycle(bucketName string) error { - resp, err := client.do("DELETE", bucketName, "lifecycle", "lifecycle", nil, nil) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) -} - -// -// GetBucketLifecycle 查看Bucket的生命周期设置。 -// -// bucketName 存储空间名称。 -// -// GetBucketLifecycleResponse 操作成功的返回值,error为nil时该返回值有效。Rules为该bucket上的规则列表。 -// error 操作无错误时为nil,非nil为错误信息。 -// -func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) { - var out GetBucketLifecycleResult - resp, err := client.do("GET", bucketName, "lifecycle", "lifecycle", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// SetBucketReferer 设置bucket的referer访问白名单和是否允许referer字段为空的请求访问。 -// -// 防止用户在OSS上的数据被其他人盗用,OSS支持基于HTTP header中表头字段referer的防盗链方法。可以通过OSS控制台或者API的方式对 -// 一个bucket设置referer字段的白名单和是否允许referer字段为空的请求访问。例如,对于一个名为oss-example的bucket, -// 设置其referer白名单为http://www.aliyun.com。则所有referer为http://www.aliyun.com的请求才能访问oss-example -// 这个bucket中的object。如果您还需要了解更多信息,请参看 -// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html -// -// bucketName 存储空间名称。 -// referers 访问白名单列表。一个bucket可以支持多个referer参数。referer参数支持通配符"*"和"?"。 -// 用法请参看示例sample/bucket_referer.go -// allowEmptyReferer 指定是否允许referer字段为空的请求访问。 默认为true。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error { - rxml := RefererXML{} - rxml.AllowEmptyReferer = allowEmptyReferer - if referers == nil { - rxml.RefererList = append(rxml.RefererList, "") - } else { - for _, referer := range referers { - rxml.RefererList = append(rxml.RefererList, referer) - } - } - - bs, err := xml.Marshal(rxml) - if err != nil { - return err - } - buffer := new(bytes.Buffer) - buffer.Write(bs) - - contentType := http.DetectContentType(buffer.Bytes()) - headers := map[string]string{} - headers[HTTPHeaderContentType] = contentType - - resp, err := client.do("PUT", bucketName, "referer", "referer", headers, buffer) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusOK}) -} - -// -// GetBucketReferer 获得Bucket的白名单地址。 -// -// bucketName 存储空间名称。 -// -// GetBucketRefererResponse 操作成功的返回值,error为nil时该返回值有效。 -// error 操作无错误时为nil,非nil为错误信息。 -// -func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) { - var out GetBucketRefererResult - resp, err := client.do("GET", bucketName, "referer", "referer", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// SetBucketLogging 修改Bucket的日志设置。 -// -// OSS为您提供自动保存访问日志记录功能。Bucket的拥有者可以开启访问日志记录功能。当一个bucket开启访问日志记录功能后, -// OSS自动将访问这个bucket的请求日志,以小时为单位,按照固定的命名规则,生成一个Object写入用户指定的bucket中。 -// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html -// -// bucketName 需要记录访问日志的Bucket。 -// targetBucket 访问日志记录到的Bucket。 -// targetPrefix bucketName中需要存储访问日志记录的object前缀。为空记录所有object的访问日志。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string, - isEnable bool) error { - var err error - var bs []byte - if isEnable { - lxml := LoggingXML{} - lxml.LoggingEnabled.TargetBucket = targetBucket - lxml.LoggingEnabled.TargetPrefix = targetPrefix - bs, err = xml.Marshal(lxml) - } else { - lxml := loggingXMLEmpty{} - bs, err = xml.Marshal(lxml) - } - - if err != nil { - return err - } - - buffer := new(bytes.Buffer) - buffer.Write(bs) - - contentType := http.DetectContentType(buffer.Bytes()) - headers := map[string]string{} - headers[HTTPHeaderContentType] = contentType - - resp, err := client.do("PUT", bucketName, "logging", "logging", headers, buffer) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusOK}) -} - -// -// DeleteBucketLogging 删除Bucket的日志设置。 -// -// bucketName 需要删除访问日志的Bucket。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) DeleteBucketLogging(bucketName string) error { - resp, err := client.do("DELETE", bucketName, "logging", "logging", nil, nil) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) -} - -// -// GetBucketLogging 获得Bucket的日志设置。 -// -// bucketName 需要删除访问日志的Bucket。 -// GetBucketLoggingResponse 操作成功的返回值,error为nil时该返回值有效。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) { - var out GetBucketLoggingResult - resp, err := client.do("GET", bucketName, "logging", "logging", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// SetBucketWebsite 设置/修改Bucket的默认首页以及错误页。 -// -// OSS支持静态网站托管,Website操作可以将一个bucket设置成静态网站托管模式 。您可以将自己的Bucket配置成静态网站托管模式。 -// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html -// -// bucketName 需要设置Website的Bucket。 -// indexDocument 索引文档。 -// errorDocument 错误文档。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error { - wxml := WebsiteXML{} - wxml.IndexDocument.Suffix = indexDocument - wxml.ErrorDocument.Key = errorDocument - - bs, err := xml.Marshal(wxml) - if err != nil { - return err - } - buffer := new(bytes.Buffer) - buffer.Write(bs) - - contentType := http.DetectContentType(buffer.Bytes()) - headers := make(map[string]string) - headers[HTTPHeaderContentType] = contentType - - resp, err := client.do("PUT", bucketName, "website", "website", headers, buffer) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusOK}) -} - -// -// DeleteBucketWebsite 删除Bucket的Website设置。 -// -// bucketName 需要删除website设置的Bucket。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) DeleteBucketWebsite(bucketName string) error { - resp, err := client.do("DELETE", bucketName, "website", "website", nil, nil) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) -} - -// -// GetBucketWebsite 获得Bucket的默认首页以及错误页。 -// -// bucketName 存储空间名称。 -// -// GetBucketWebsiteResponse 操作成功的返回值,error为nil时该返回值有效。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) { - var out GetBucketWebsiteResult - resp, err := client.do("GET", bucketName, "website", "website", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// SetBucketCORS 设置Bucket的跨域访问(CORS)规则。 -// -// 跨域访问的更多信息,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html -// -// bucketName 需要设置Website的Bucket。 -// corsRules 待设置的CORS规则。用法请参看示例代码sample/bucket_cors.go。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error { - corsxml := CORSXML{} - for _, v := range corsRules { - cr := CORSRule{} - cr.AllowedMethod = v.AllowedMethod - cr.AllowedOrigin = v.AllowedOrigin - cr.AllowedHeader = v.AllowedHeader - cr.ExposeHeader = v.ExposeHeader - cr.MaxAgeSeconds = v.MaxAgeSeconds - corsxml.CORSRules = append(corsxml.CORSRules, cr) - } - - bs, err := xml.Marshal(corsxml) - if err != nil { - return err - } - buffer := new(bytes.Buffer) - buffer.Write(bs) - - contentType := http.DetectContentType(buffer.Bytes()) - headers := map[string]string{} - headers[HTTPHeaderContentType] = contentType - - resp, err := client.do("PUT", bucketName, "cors", "cors", headers, buffer) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusOK}) -} - -// -// DeleteBucketCORS 删除Bucket的Website设置。 -// -// bucketName 需要删除cors设置的Bucket。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) DeleteBucketCORS(bucketName string) error { - resp, err := client.do("DELETE", bucketName, "cors", "cors", nil, nil) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) -} - -// -// GetBucketCORS 获得Bucket的CORS设置。 -// -// -// bucketName 存储空间名称。 -// GetBucketCORSResult 操作成功的返回值,error为nil时该返回值有效。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) { - var out GetBucketCORSResult - resp, err := client.do("GET", bucketName, "cors", "cors", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// GetBucketInfo 获得Bucket的信息。 -// -// bucketName 存储空间名称。 -// GetBucketInfoResult 操作成功的返回值,error为nil时该返回值有效。 -// -// error 操作无错误为nil,非nil为错误信息。 -// -func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) { - var out GetBucketInfoResult - resp, err := client.do("GET", bucketName, "bucketInfo", "bucketInfo", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// UseCname 设置是否使用CNAME,默认不使用。 -// -// isUseCname true设置endpoint格式是cname格式,false为非cname格式,默认false -// -func UseCname(isUseCname bool) ClientOption { - return func(client *Client) { - client.Config.IsCname = isUseCname - client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) - } -} - -// -// Timeout 设置HTTP超时时间。 -// -// connectTimeoutSec HTTP链接超时时间,单位是秒,默认10秒。0表示永不超时。 -// readWriteTimeout HTTP发送接受数据超时时间,单位是秒,默认20秒。0表示永不超时。 -// -func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption { - return func(client *Client) { - client.Config.HTTPTimeout.ConnectTimeout = - time.Second * time.Duration(connectTimeoutSec) - client.Config.HTTPTimeout.ReadWriteTimeout = - time.Second * time.Duration(readWriteTimeout) - client.Config.HTTPTimeout.HeaderTimeout = - time.Second * time.Duration(readWriteTimeout) - client.Config.HTTPTimeout.LongTimeout = - time.Second * time.Duration(readWriteTimeout*10) - } -} - -// -// SecurityToken 临时用户设置SecurityToken。 -// -// token STS token -// -func SecurityToken(token string) ClientOption { - return func(client *Client) { - client.Config.SecurityToken = strings.TrimSpace(token) - } -} - -// -// EnableMD5 是否启用MD5校验,默认启用。 -// -// isEnableMD5 true启用MD5校验,false不启用MD5校验 -// -func EnableMD5(isEnableMD5 bool) ClientOption { - return func(client *Client) { - client.Config.IsEnableMD5 = isEnableMD5 - } -} - -// -// MD5ThresholdCalcInMemory 使用内存计算MD5值的上限,默认16MB。 -// -// threshold 单位Byte。上传内容小于threshold在MD5在内存中计算,大于使用临时文件计算MD5 -// -func MD5ThresholdCalcInMemory(threshold int64) ClientOption { - return func(client *Client) { - client.Config.MD5Threshold = threshold - } -} - -// -// EnableCRC 上传是否启用CRC校验,默认启用。 -// -// isEnableCRC true启用CRC校验,false不启用CRC校验 -// -func EnableCRC(isEnableCRC bool) ClientOption { - return func(client *Client) { - client.Config.IsEnableCRC = isEnableCRC - } -} - -// -// UserAgent 指定UserAgent,默认如下aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2)。 -// -// userAgent user agent字符串。 -// -func UserAgent(userAgent string) ClientOption { - return func(client *Client) { - client.Config.UserAgent = userAgent - } -} - -// -// Proxy 设置代理服务器,默认不使用代理。 -// -// proxyHost 代理服务器地址,格式是host或host:port -// -func Proxy(proxyHost string) ClientOption { - return func(client *Client) { - client.Config.IsUseProxy = true - client.Config.ProxyHost = proxyHost - client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) - } -} - -// -// AuthProxy 设置需要认证的代理服务器,默认不使用代理。 -// -// proxyHost 代理服务器地址,格式是host或host:port -// proxyUser 代理服务器认证的用户名 -// proxyPassword 代理服务器认证的用户密码 -// -func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption { - return func(client *Client) { - client.Config.IsUseProxy = true - client.Config.ProxyHost = proxyHost - client.Config.IsAuthProxy = true - client.Config.ProxyUser = proxyUser - client.Config.ProxyPassword = proxyPassword - client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) - } -} - -// Private -func (client Client) do(method, bucketName, urlParams, subResource string, - headers map[string]string, data io.Reader) (*Response, error) { - return client.Conn.Do(method, bucketName, "", urlParams, - subResource, headers, data, 0) -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go deleted file mode 100755 index 4ab81cb..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go +++ /dev/null @@ -1,67 +0,0 @@ -package oss - -import ( - "time" -) - -// HTTPTimeout http timeout -type HTTPTimeout struct { - ConnectTimeout time.Duration - ReadWriteTimeout time.Duration - HeaderTimeout time.Duration - LongTimeout time.Duration -} - -// Config oss configure -type Config struct { - Endpoint string // oss地址 - AccessKeyID string // accessId - AccessKeySecret string // accessKey - RetryTimes uint // 失败重试次数,默认5 - UserAgent string // SDK名称/版本/系统信息 - IsDebug bool // 是否开启调试模式,默认false - Timeout uint // 超时时间,默认60s - SecurityToken string // STS Token - IsCname bool // Endpoint是否是CNAME - HTTPTimeout HTTPTimeout // HTTP的超时时间设置 - IsUseProxy bool // 是否使用代理 - ProxyHost string // 代理服务器地址 - IsAuthProxy bool // 代理服务器是否使用用户认证 - ProxyUser string // 代理服务器认证用户名 - ProxyPassword string // 代理服务器认证密码 - IsEnableMD5 bool // 上传数据时是否启用MD5校验 - MD5Threshold int64 // 内存中计算MD5的上线大小,大于该值启用临时文件,单位Byte - IsEnableCRC bool // 上传数据时是否启用CRC64校验 -} - -// 获取默认配置 -func getDefaultOssConfig() *Config { - config := Config{} - - config.Endpoint = "" - config.AccessKeyID = "" - config.AccessKeySecret = "" - config.RetryTimes = 5 - config.IsDebug = false - config.UserAgent = userAgent - config.Timeout = 60 // seconds - config.SecurityToken = "" - config.IsCname = false - - config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s - config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s - config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s - config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s - - config.IsUseProxy = false - config.ProxyHost = "" - config.IsAuthProxy = false - config.ProxyUser = "" - config.ProxyPassword = "" - - config.MD5Threshold = 16 * 1024 * 1024 // 16MB - config.IsEnableMD5 = false - config.IsEnableCRC = true - - return &config -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go deleted file mode 100755 index db99c45..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go +++ /dev/null @@ -1,420 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/xml" - "fmt" - "hash" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" -) - -// Conn oss conn -type Conn struct { - config *Config - url *urlMaker -} - -// Do 处理请求,返回响应结果。 -func (conn Conn) Do(method, bucketName, objectName, urlParams, subResource string, - headers map[string]string, data io.Reader, initCRC uint64) (*Response, error) { - uri := conn.url.getURL(bucketName, objectName, urlParams) - resource := conn.url.getResource(bucketName, objectName, subResource) - return conn.doRequest(method, uri, resource, headers, data, initCRC) -} - -func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, - headers map[string]string, data io.Reader, initCRC uint64) (*Response, error) { - httpTimeOut := conn.config.HTTPTimeout - method = strings.ToUpper(method) - if !conn.config.IsUseProxy { - uri.Opaque = uri.Path - } - req := &http.Request{ - Method: method, - URL: uri, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: uri.Host, - } - - fd, crc := conn.handleBody(req, data, initCRC) - if fd != nil { - defer func() { - fd.Close() - os.Remove(fd.Name()) - }() - } - - date := time.Now().UTC().Format(http.TimeFormat) - req.Header.Set(HTTPHeaderDate, date) - req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) - req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) - if conn.config.SecurityToken != "" { - req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken) - } - - if headers != nil { - for k, v := range headers { - req.Header.Set(k, v) - } - } - - conn.signHeader(req, canonicalizedResource) - - var transport *http.Transport - if conn.config.IsUseProxy { - // proxy - proxyURL, err := url.Parse(conn.config.ProxyHost) - if err != nil { - return nil, err - } - - transport = &http.Transport{ - Proxy: http.ProxyURL(proxyURL), - Dial: func(netw, addr string) (net.Conn, error) { - conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout) - if err != nil { - return nil, err - } - return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil - }, - ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, - MaxIdleConnsPerHost: 2000, - } - - if conn.config.IsAuthProxy { - auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword - basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) - req.Header.Set("Proxy-Authorization", basic) - } - } else { - // no proxy - transport = &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout) - if err != nil { - return nil, err - } - return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil - }, - ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, - MaxIdleConnsPerHost: 2000, - } - } - - timeoutClient := &http.Client{Transport: transport} - - resp, err := timeoutClient.Do(req) - if err != nil { - return nil, err - } - - return conn.handleResponse(resp, crc) -} - -// handle request body -func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64) (*os.File, hash.Hash64) { - var file *os.File - var crc hash.Hash64 - reader := body - - // length - switch v := body.(type) { - case *bytes.Buffer: - req.ContentLength = int64(v.Len()) - case *bytes.Reader: - req.ContentLength = int64(v.Len()) - case *strings.Reader: - req.ContentLength = int64(v.Len()) - case *os.File: - req.ContentLength = tryGetFileSize(v) - } - req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10)) - - // md5 - if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" { - if req.ContentLength == 0 || req.ContentLength > conn.config.MD5Threshold { - // huge body, use temporary file - file, _ = ioutil.TempFile(os.TempDir(), TempFilePrefix) - if file != nil { - io.Copy(file, body) - file.Seek(0, os.SEEK_SET) - md5 := md5.New() - io.Copy(md5, file) - sum := md5.Sum(nil) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - req.Header.Set(HTTPHeaderContentMD5, b64) - file.Seek(0, os.SEEK_SET) - reader = file - } - } else { - // small body, use memory - buf, _ := ioutil.ReadAll(body) - sum := md5.Sum(buf) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - req.Header.Set(HTTPHeaderContentMD5, b64) - reader = bytes.NewReader(buf) - } - } - - if reader != nil && conn.config.IsEnableCRC { - crc = NewCRC(crcTable(), initCRC) - reader = io.TeeReader(reader, crc) - } - - rc, ok := reader.(io.ReadCloser) - if !ok && reader != nil { - rc = ioutil.NopCloser(reader) - } - req.Body = rc - - return file, crc -} - -func tryGetFileSize(f *os.File) int64 { - fInfo, _ := f.Stat() - return fInfo.Size() -} - -// handle response -func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) { - var cliCRC uint64 - var srvCRC uint64 - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - // 4xx and 5xx indicate that the operation has error occurred - var respBody []byte - respBody, err := readResponseBody(resp) - if err != nil { - return nil, err - } - - if len(respBody) == 0 { - // no error in response body - err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status) - } else { - // response contains storage service error object, unmarshal - srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, - resp.Header.Get(HTTPHeaderOssRequestID)) - if err != nil { // error unmarshaling the error response - err = errIn - } - err = srvErr - } - return &Response{ - StatusCode: resp.StatusCode, - Headers: resp.Header, - Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body - }, err - } else if statusCode >= 300 && statusCode <= 307 { - // oss use 3xx, but response has no body - err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status) - return &Response{ - StatusCode: resp.StatusCode, - Headers: resp.Header, - Body: resp.Body, - }, err - } - - if conn.config.IsEnableCRC && crc != nil { - cliCRC = crc.Sum64() - } - srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64) - - // 2xx, successful - return &Response{ - StatusCode: resp.StatusCode, - Headers: resp.Header, - Body: resp.Body, - ClientCRC: cliCRC, - ServerCRC: srvCRC, - }, nil -} - -func readResponseBody(resp *http.Response) ([]byte, error) { - defer resp.Body.Close() - out, err := ioutil.ReadAll(resp.Body) - if err == io.EOF { - err = nil - } - return out, err -} - -func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) { - var storageErr ServiceError - if err := xml.Unmarshal(body, &storageErr); err != nil { - return storageErr, err - } - storageErr.StatusCode = statusCode - storageErr.RequestID = requestID - storageErr.RawMessage = string(body) - return storageErr, nil -} - -func xmlUnmarshal(body io.Reader, v interface{}) error { - data, err := ioutil.ReadAll(body) - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -// Handle http timeout -type timeoutConn struct { - conn net.Conn - timeout time.Duration - longTimeout time.Duration -} - -func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn { - conn.SetReadDeadline(time.Now().Add(longTimeout)) - return &timeoutConn{ - conn: conn, - timeout: timeout, - longTimeout: longTimeout, - } -} - -func (c *timeoutConn) Read(b []byte) (n int, err error) { - c.SetReadDeadline(time.Now().Add(c.timeout)) - n, err = c.conn.Read(b) - c.SetReadDeadline(time.Now().Add(c.longTimeout)) - return n, err -} - -func (c *timeoutConn) Write(b []byte) (n int, err error) { - c.SetWriteDeadline(time.Now().Add(c.timeout)) - n, err = c.conn.Write(b) - c.SetReadDeadline(time.Now().Add(c.longTimeout)) - return n, err -} - -func (c *timeoutConn) Close() error { - return c.conn.Close() -} - -func (c *timeoutConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *timeoutConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *timeoutConn) SetDeadline(t time.Time) error { - return c.conn.SetDeadline(t) -} - -func (c *timeoutConn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -func (c *timeoutConn) SetWriteDeadline(t time.Time) error { - return c.conn.SetWriteDeadline(t) -} - -// UrlMaker - build url and resource -const ( - urlTypeCname = 1 - urlTypeIP = 2 - urlTypeAliyun = 3 -) - -type urlMaker struct { - Scheme string // http or https - NetLoc string // host or ip - Type int // 1 CNAME 2 IP 3 ALIYUN - IsProxy bool // proxy -} - -// Parse endpoint -func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) { - if strings.HasPrefix(endpoint, "http://") { - um.Scheme = "http" - um.NetLoc = endpoint[len("http://"):] - } else if strings.HasPrefix(endpoint, "https://") { - um.Scheme = "https" - um.NetLoc = endpoint[len("https://"):] - } else { - um.Scheme = "http" - um.NetLoc = endpoint - } - - host, _, err := net.SplitHostPort(um.NetLoc) - if err != nil { - host = um.NetLoc - } - ip := net.ParseIP(host) - if ip != nil { - um.Type = urlTypeIP - } else if isCname { - um.Type = urlTypeCname - } else { - um.Type = urlTypeAliyun - } - um.IsProxy = isProxy -} - -// Build URL -func (um urlMaker) getURL(bucket, object, params string) *url.URL { - var host = "" - var path = "" - - if !um.IsProxy { - object = url.QueryEscape(object) - } - - if um.Type == urlTypeCname { - host = um.NetLoc - path = "/" + object - } else if um.Type == urlTypeIP { - if bucket == "" { - host = um.NetLoc - path = "/" - } else { - host = um.NetLoc - path = fmt.Sprintf("/%s/%s", bucket, object) - } - } else { - if bucket == "" { - host = um.NetLoc - path = "/" - } else { - host = bucket + "." + um.NetLoc - path = "/" + object - } - } - - uri := &url.URL{ - Scheme: um.Scheme, - Host: host, - Path: path, - RawQuery: params, - } - - return uri -} - -// Canonicalized Resource -func (um urlMaker) getResource(bucketName, objectName, subResource string) string { - if subResource != "" { - subResource = "?" + subResource - } - if bucketName == "" { - return fmt.Sprintf("/%s%s", bucketName, subResource) - } - return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go deleted file mode 100755 index 3f4f3db..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go +++ /dev/null @@ -1,82 +0,0 @@ -package oss - -// ACLType Bucket/Object的访问控制 -type ACLType string - -const ( - // ACLPrivate 私有读写 - ACLPrivate ACLType = "private" - - // ACLPublicRead 公共读私有写 - ACLPublicRead ACLType = "tools-read" - - // ACLPublicReadWrite 公共读写 - ACLPublicReadWrite ACLType = "tools-read-write" - - // ACLDefault Object默认权限,Bucket无此权限 - ACLDefault ACLType = "default" -) - -// MetadataDirectiveType 对象COPY时新对象是否使用原对象的Meta -type MetadataDirectiveType string - -const ( - // MetaCopy 目标对象使用源对象的META - MetaCopy MetadataDirectiveType = "COPY" - - // MetaReplace 目标对象使用自定义的META - MetaReplace MetadataDirectiveType = "REPLACE" -) - -// Http头标签 -const ( - HTTPHeaderAcceptEncoding string = "Accept-Encoding" - HTTPHeaderAuthorization = "Authorization" - HTTPHeaderCacheControl = "Cache-Control" - HTTPHeaderContentDisposition = "Content-Disposition" - HTTPHeaderContentEncoding = "Content-Encoding" - HTTPHeaderContentLength = "Content-Length" - HTTPHeaderContentMD5 = "Content-MD5" - HTTPHeaderContentType = "Content-Type" - HTTPHeaderContentLanguage = "Content-Language" - HTTPHeaderDate = "Date" - HTTPHeaderEtag = "ETag" - HTTPHeaderExpires = "Expires" - HTTPHeaderHost = "Host" - HTTPHeaderLastModified = "Last-Modified" - HTTPHeaderRange = "Range" - HTTPHeaderLocation = "Location" - HTTPHeaderOrigin = "Origin" - HTTPHeaderServer = "Server" - HTTPHeaderUserAgent = "User-Agent" - HTTPHeaderIfModifiedSince = "If-Modified-Since" - HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" - HTTPHeaderIfMatch = "If-Match" - HTTPHeaderIfNoneMatch = "If-None-Match" - - HTTPHeaderOssACL = "X-Oss-Acl" - HTTPHeaderOssMetaPrefix = "X-Oss-Meta-" - HTTPHeaderOssObjectACL = "X-Oss-Object-Acl" - HTTPHeaderOssSecurityToken = "X-Oss-Security-Token" - HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" - HTTPHeaderOssCopySource = "X-Oss-Copy-Source" - HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" - HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" - HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" - HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" - HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" - HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive" - HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" - HTTPHeaderOssRequestID = "X-Oss-Request-Id" - HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" -) - -// 其它常量 -const ( - MaxPartSize = 5 * 1024 * 1024 * 1024 // 文件片最大值,5GB - MinPartSize = 100 * 1024 // 文件片最小值,100KB - - TempFilePrefix = "oss-go-temp-" // 临时文件前缀 - - Version = "1.2.1" // Go sdk版本 -) diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go deleted file mode 100755 index c2715fd..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go +++ /dev/null @@ -1,44 +0,0 @@ -package oss - -import ( - "hash" - "hash/crc64" -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - crc uint64 - tab *crc64.Table -} - -// NewCRC creates a new hash.Hash64 computing the CRC-64 checksum -// using the polynomial represented by the Table. -func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} } - -// Size returns the number of bytes Sum will return. -func (d *digest) Size() int { return crc64.Size } - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (d *digest) BlockSize() int { return 1 } - -// Reset resets the Hash to its initial state. -func (d *digest) Reset() { d.crc = 0 } - -// Write (via the embedded io.Writer interface) adds more data to the running hash. -// It never returns an error. -func (d *digest) Write(p []byte) (n int, err error) { - d.crc = crc64.Update(d.crc, d.tab, p) - return len(p), nil -} - -// Sum64 returns crc64 value. -func (d *digest) Sum64() uint64 { return d.crc } - -// Sum returns hash value. -func (d *digest) Sum(in []byte) []byte { - s := d.Sum64() - return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go deleted file mode 100755 index e0a213d..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go +++ /dev/null @@ -1,399 +0,0 @@ -package oss - -import ( - "crypto/md5" - "encoding/base64" - "encoding/json" - "errors" - "io" - "io/ioutil" - "os" - "strconv" -) - -// -// DownloadFile 分片下载文件 -// -// objectKey object key。 -// filePath 本地文件。objectKey下载到文件。 -// partSize 本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。 -// options Object的属性限制项。详见GetObject。 -// -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error { - if partSize < 1 || partSize > MaxPartSize { - return errors.New("oss: part size invalid range (1, 5GB]") - } - - cpConf, err := getCpConfig(options, filePath) - if err != nil { - return err - } - - routines := getRoutines(options) - - if cpConf.IsEnable { - return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines) - } - - return bucket.downloadFile(objectKey, filePath, partSize, options, routines) -} - -// ----- 并发无断点的下载 ----- - -// 工作协程参数 -type downloadWorkerArg struct { - bucket *Bucket - key string - filePath string - options []Option - hook downloadPartHook -} - -// Hook用于测试 -type downloadPartHook func(part downloadPart) error - -var downloadPartHooker downloadPartHook = defaultDownloadPartHook - -func defaultDownloadPartHook(part downloadPart) error { - return nil -} - -// 工作协程 -func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <- chan bool) { - for part := range jobs { - if err := arg.hook(part); err != nil { - failed <- err - break - } - - opt := Range(part.Start, part.End) - opts := append(arg.options, opt) - rd, err := arg.bucket.GetObject(arg.key, opts...) - if err != nil { - failed <- err - break - } - defer rd.Close() - - select { - case <-die: - return - default: - } - - fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, 0660) - if err != nil { - failed <- err - break - } - defer fd.Close() - - _, err = fd.Seek(part.Start, os.SEEK_SET) - if err != nil { - failed <- err - break - } - - _, err = io.Copy(fd, rd) - if err != nil { - failed <- err - break - } - - results <- part - } -} - -// 调度协程 -func downloadScheduler(jobs chan downloadPart, parts []downloadPart) { - for _, part := range parts { - jobs <- part - } - close(jobs) -} - -// 下载片 -type downloadPart struct { - Index int // 片序号,从0开始编号 - Start int64 // 片起始位置 - End int64 // 片结束位置 -} - -// 文件分片 -func getDownloadParts(bucket *Bucket, objectKey string, partSize int64) ([]downloadPart, error) { - meta, err := bucket.GetObjectDetailedMeta(objectKey) - if err != nil { - return nil, err - } - - parts := []downloadPart{} - objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) - if err != nil { - return nil, err - } - - part := downloadPart{} - i := 0 - for offset := int64(0); offset < objectSize; offset += partSize { - part.Index = i - part.Start = offset - part.End = GetPartEnd(offset, objectSize, partSize) - parts = append(parts, part) - i++ - } - return parts, nil -} - -// 并发无断点续传的下载 -func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { - // 如果文件不存在则创建,存在不清空,下载分片会重写文件内容 - fd, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0660) - if err != nil { - return err - } - fd.Close() - - // 分割文件 - parts, err := getDownloadParts(&bucket, objectKey, partSize) - if err != nil { - return err - } - - jobs := make(chan downloadPart, len(parts)) - results := make(chan downloadPart, len(parts)) - failed := make(chan error) - die := make(chan bool) - - // 启动工作协程 - arg := downloadWorkerArg{&bucket, objectKey, filePath, options, downloadPartHooker} - for w := 1; w <= routines; w++ { - go downloadWorker(w, arg, jobs, results, failed, die) - } - - // 并发上传分片 - go downloadScheduler(jobs, parts) - - // 等待分片下载完成 - completed := 0 - ps := make([]downloadPart, len(parts)) - for completed < len(parts) { - select { - case part := <-results: - completed++ - ps[part.Index] = part - case err := <-failed: - close(die) - return err - } - - if completed >= len(parts) { - break - } - } - - return nil -} - -// ----- 并发有断点的下载 ----- - -const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" - -type downloadCheckpoint struct { - Magic string // magic - MD5 string // cp内容的MD5 - FilePath string // 本地文件 - Object string // key - ObjStat objectStat // 文件状态 - Parts []downloadPart // 全部分片 - PartStat []bool // 分片下载是否完成 -} - -type objectStat struct { - Size int64 // 大小 - LastModified string // 最后修改时间 - Etag string // etag -} - -// CP数据是否有效,CP有效且Object没有更新时有效 -func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) { - // 比较CP的Magic及MD5 - cpb := cp - cpb.MD5 = "" - js, _ := json.Marshal(cpb) - sum := md5.Sum(js) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - - if cp.Magic != downloadCpMagic || b64 != cp.MD5 { - return false, nil - } - - // 确认object没有更新 - meta, err := bucket.GetObjectDetailedMeta(objectKey) - if err != nil { - return false, err - } - - objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) - if err != nil { - return false, err - } - - // 比较Object的大小/最后修改时间/etag - if cp.ObjStat.Size != objectSize || - cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || - cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { - return false, nil - } - - return true, nil -} - -// 从文件中load -func (cp *downloadCheckpoint) load(filePath string) error { - contents, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - - err = json.Unmarshal(contents, cp) - return err -} - -// dump到文件 -func (cp *downloadCheckpoint) dump(filePath string) error { - bcp := *cp - - // 计算MD5 - bcp.MD5 = "" - js, err := json.Marshal(bcp) - if err != nil { - return err - } - sum := md5.Sum(js) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - bcp.MD5 = b64 - - // 序列化 - js, err = json.Marshal(bcp) - if err != nil { - return err - } - - // dump - return ioutil.WriteFile(filePath, js, 0644) -} - -// 未完成的分片 -func (cp downloadCheckpoint) todoParts() []downloadPart { - dps := []downloadPart{} - for i, ps := range cp.PartStat { - if !ps { - dps = append(dps, cp.Parts[i]) - } - } - return dps -} - -// 初始化下载任务 -func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64) error { - // cp - cp.Magic = downloadCpMagic - cp.FilePath = filePath - cp.Object = objectKey - - // object - meta, err := bucket.GetObjectDetailedMeta(objectKey) - if err != nil { - return err - } - - objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) - if err != nil { - return err - } - - cp.ObjStat.Size = objectSize - cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) - cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) - - // parts - cp.Parts, err = getDownloadParts(bucket, objectKey, partSize) - if err != nil { - return err - } - cp.PartStat = make([]bool, len(cp.Parts)) - for i := range cp.PartStat { - cp.PartStat[i] = false - } - - return nil -} - -func (cp *downloadCheckpoint) complete(cpFilePath string) error { - os.Remove(cpFilePath) - return nil -} - -// 并发带断点的下载 -func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { - // LOAD CP数据 - dcp := downloadCheckpoint{} - err := dcp.load(cpFilePath) - if err != nil { - os.Remove(cpFilePath) - } - - // LOAD出错或数据无效重新初始化下载 - valid, err := dcp.isValid(&bucket, objectKey) - if err != nil || !valid { - if err = dcp.prepare(&bucket, objectKey, filePath, partSize); err != nil { - return err - } - os.Remove(cpFilePath) - } - - // 如果文件不存在则创建,存在不清空,下载分片会重写文件内容 - fd, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0660) - if err != nil { - return err - } - fd.Close() - - // 未完成的分片 - parts := dcp.todoParts() - jobs := make(chan downloadPart, len(parts)) - results := make(chan downloadPart, len(parts)) - failed := make(chan error) - die := make(chan bool) - - // 启动工作协程 - arg := downloadWorkerArg{&bucket, objectKey, filePath, options, downloadPartHooker} - for w := 1; w <= routines; w++ { - go downloadWorker(w, arg, jobs, results, failed, die) - } - - // 并发下载分片 - go downloadScheduler(jobs, parts) - - // 等待分片下载完成 - completed := 0 - for completed < len(parts) { - select { - case part := <-results: - completed++ - dcp.PartStat[part.Index] = true - dcp.dump(cpFilePath) - case err := <-failed: - close(die) - return err - } - - if completed >= len(parts) { - break - } - } - - return dcp.complete(cpFilePath) -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go deleted file mode 100755 index d1c4be2..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go +++ /dev/null @@ -1,82 +0,0 @@ -package oss - -import ( - "encoding/xml" - "fmt" - "net/http" - "strings" -) - -// ServiceError contains fields of the error response from Oss Service REST API. -type ServiceError struct { - XMLName xml.Name `xml:"Error"` - Code string `xml:"Code"` // OSS返回给用户的错误码 - Message string `xml:"Message"` // OSS给出的详细错误信息 - RequestID string `xml:"RequestId"` // 用于唯一标识该次请求的UUID - HostID string `xml:"HostId"` // 用于标识访问的OSS集群 - RawMessage string // OSS返回的原始消息内容 - StatusCode int // HTTP状态码 -} - -// Implement interface error -func (e ServiceError) Error() string { - return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", - e.StatusCode, e.Code, e.Message, e.RequestID) -} - -// UnexpectedStatusCodeError is returned when a storage service responds with neither an error -// nor with an HTTP status code indicating success. -type UnexpectedStatusCodeError struct { - allowed []int // 预期OSS返回HTTP状态码 - got int // OSS实际返回HTTP状态码 -} - -// Implement interface error -func (e UnexpectedStatusCodeError) Error() string { - s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } - - got := s(e.got) - expected := []string{} - for _, v := range e.allowed { - expected = append(expected, s(v)) - } - return fmt.Sprintf("oss: status code from service response is %s; was expecting %s", - got, strings.Join(expected, " or ")) -} - -// Got is the actual status code returned by oss. -func (e UnexpectedStatusCodeError) Got() int { - return e.got -} - -// checkRespCode returns UnexpectedStatusError if the given response code is not -// one of the allowed status codes; otherwise nil. -func checkRespCode(respCode int, allowed []int) error { - for _, v := range allowed { - if respCode == v { - return nil - } - } - return UnexpectedStatusCodeError{allowed, respCode} -} - -// CRCCheckError is returned when crc check is inconsistent between client and server -type CRCCheckError struct { - clientCRC uint64 // 客户端计算的CRC64值 - serverCRC uint64 // 服务端计算的CRC64值 - operation string // 上传操作,如PutObject/AppendObject/UploadPart等 - requestID string // 本次操作的RequestID -} - -// Implement interface error -func (e CRCCheckError) Error() string { - return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s", - e.operation, e.clientCRC, e.serverCRC, e.requestID) -} - -func checkCRC(resp *Response, operation string) error { - if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC { - return nil - } - return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)} -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go deleted file mode 100755 index 03032d2..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go +++ /dev/null @@ -1,245 +0,0 @@ -package oss - -import ( - "mime" - "path" - "strings" -) - -var extToMimeType = map[string]string{ - ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", - ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", - ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", - ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", - ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", - ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", - ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", - ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", - ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", - ".apk": "application/vnd.android.package-archive", - ".hqx": "application/mac-binhex40", - ".cpt": "application/mac-compactpro", - ".doc": "application/msword", - ".ogg": "application/ogg", - ".pdf": "application/pdf", - ".rtf": "text/rtf", - ".mif": "application/vnd.mif", - ".xls": "application/vnd.ms-excel", - ".ppt": "application/vnd.ms-powerpoint", - ".odc": "application/vnd.oasis.opendocument.chart", - ".odb": "application/vnd.oasis.opendocument.database", - ".odf": "application/vnd.oasis.opendocument.formula", - ".odg": "application/vnd.oasis.opendocument.graphics", - ".otg": "application/vnd.oasis.opendocument.graphics-template", - ".odi": "application/vnd.oasis.opendocument.image", - ".odp": "application/vnd.oasis.opendocument.presentation", - ".otp": "application/vnd.oasis.opendocument.presentation-template", - ".ods": "application/vnd.oasis.opendocument.spreadsheet", - ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", - ".odt": "application/vnd.oasis.opendocument.text", - ".odm": "application/vnd.oasis.opendocument.text-master", - ".ott": "application/vnd.oasis.opendocument.text-template", - ".oth": "application/vnd.oasis.opendocument.text-web", - ".sxw": "application/vnd.sun.xml.writer", - ".stw": "application/vnd.sun.xml.writer.template", - ".sxc": "application/vnd.sun.xml.calc", - ".stc": "application/vnd.sun.xml.calc.template", - ".sxd": "application/vnd.sun.xml.draw", - ".std": "application/vnd.sun.xml.draw.template", - ".sxi": "application/vnd.sun.xml.impress", - ".sti": "application/vnd.sun.xml.impress.template", - ".sxg": "application/vnd.sun.xml.writer.global", - ".sxm": "application/vnd.sun.xml.math", - ".sis": "application/vnd.symbian.install", - ".wbxml": "application/vnd.wap.wbxml", - ".wmlc": "application/vnd.wap.wmlc", - ".wmlsc": "application/vnd.wap.wmlscriptc", - ".bcpio": "application/x-bcpio", - ".torrent": "application/x-bittorrent", - ".bz2": "application/x-bzip2", - ".vcd": "application/x-cdlink", - ".pgn": "application/x-chess-pgn", - ".cpio": "application/x-cpio", - ".csh": "application/x-csh", - ".dvi": "application/x-dvi", - ".spl": "application/x-futuresplash", - ".gtar": "application/x-gtar", - ".hdf": "application/x-hdf", - ".jar": "application/x-java-archive", - ".jnlp": "application/x-java-jnlp-file", - ".js": "application/x-javascript", - ".ksp": "application/x-kspread", - ".chrt": "application/x-kchart", - ".kil": "application/x-killustrator", - ".latex": "application/x-latex", - ".rpm": "application/x-rpm", - ".sh": "application/x-sh", - ".shar": "application/x-shar", - ".swf": "application/x-shockwave-flash", - ".sit": "application/x-stuffit", - ".sv4cpio": "application/x-sv4cpio", - ".sv4crc": "application/x-sv4crc", - ".tar": "application/x-tar", - ".tcl": "application/x-tcl", - ".tex": "application/x-tex", - ".man": "application/x-troff-man", - ".me": "application/x-troff-me", - ".ms": "application/x-troff-ms", - ".ustar": "application/x-ustar", - ".src": "application/x-wais-source", - ".zip": "application/zip", - ".m3u": "audio/x-mpegurl", - ".ra": "audio/x-pn-realaudio", - ".wav": "audio/x-wav", - ".wma": "audio/x-ms-wma", - ".wax": "audio/x-ms-wax", - ".pdb": "chemical/x-pdb", - ".xyz": "chemical/x-xyz", - ".bmp": "image/bmp", - ".gif": "image/gif", - ".ief": "image/ief", - ".png": "image/png", - ".wbmp": "image/vnd.wap.wbmp", - ".ras": "image/x-cmu-raster", - ".pnm": "image/x-portable-anymap", - ".pbm": "image/x-portable-bitmap", - ".pgm": "image/x-portable-graymap", - ".ppm": "image/x-portable-pixmap", - ".rgb": "image/x-rgb", - ".xbm": "image/x-xbitmap", - ".xpm": "image/x-xpixmap", - ".xwd": "image/x-xwindowdump", - ".css": "text/css", - ".rtx": "text/richtext", - ".tsv": "text/tab-separated-values", - ".jad": "text/vnd.sun.j2me.app-descriptor", - ".wml": "text/vnd.wap.wml", - ".wmls": "text/vnd.wap.wmlscript", - ".etx": "text/x-setext", - ".mxu": "video/vnd.mpegurl", - ".flv": "video/x-flv", - ".wm": "video/x-ms-wm", - ".wmv": "video/x-ms-wmv", - ".wmx": "video/x-ms-wmx", - ".wvx": "video/x-ms-wvx", - ".avi": "video/x-msvideo", - ".movie": "video/x-sgi-movie", - ".ice": "x-conference/x-cooltalk", - ".3gp": "video/3gpp", - ".ai": "application/postscript", - ".aif": "audio/x-aiff", - ".aifc": "audio/x-aiff", - ".aiff": "audio/x-aiff", - ".asc": "text/plain", - ".atom": "application/atom+xml", - ".au": "audio/basic", - ".bin": "application/octet-stream", - ".cdf": "application/x-netcdf", - ".cgm": "image/cgm", - ".class": "application/octet-stream", - ".dcr": "application/x-director", - ".dif": "video/x-dv", - ".dir": "application/x-director", - ".djv": "image/vnd.djvu", - ".djvu": "image/vnd.djvu", - ".dll": "application/octet-stream", - ".dmg": "application/octet-stream", - ".dms": "application/octet-stream", - ".dtd": "application/xml-dtd", - ".dv": "video/x-dv", - ".dxr": "application/x-director", - ".eps": "application/postscript", - ".exe": "application/octet-stream", - ".ez": "application/andrew-inset", - ".gram": "application/srgs", - ".grxml": "application/srgs+xml", - ".gz": "application/x-gzip", - ".htm": "text/html", - ".html": "text/html", - ".ico": "image/x-icon", - ".ics": "text/calendar", - ".ifb": "text/calendar", - ".iges": "model/iges", - ".igs": "model/iges", - ".jp2": "image/jp2", - ".jpe": "image/jpeg", - ".jpeg": "image/jpeg", - ".jpg": "image/jpeg", - ".kar": "audio/midi", - ".lha": "application/octet-stream", - ".lzh": "application/octet-stream", - ".m4a": "audio/mp4a-latm", - ".m4p": "audio/mp4a-latm", - ".m4u": "video/vnd.mpegurl", - ".m4v": "video/x-m4v", - ".mac": "image/x-macpaint", - ".mathml": "application/mathml+xml", - ".mesh": "model/mesh", - ".mid": "audio/midi", - ".midi": "audio/midi", - ".mov": "video/quicktime", - ".mp2": "audio/mpeg", - ".mp3": "audio/mpeg", - ".mp4": "video/mp4", - ".mpe": "video/mpeg", - ".mpeg": "video/mpeg", - ".mpg": "video/mpeg", - ".mpga": "audio/mpeg", - ".msh": "model/mesh", - ".nc": "application/x-netcdf", - ".oda": "application/oda", - ".ogv": "video/ogv", - ".pct": "image/pict", - ".pic": "image/pict", - ".pict": "image/pict", - ".pnt": "image/x-macpaint", - ".pntg": "image/x-macpaint", - ".ps": "application/postscript", - ".qt": "video/quicktime", - ".qti": "image/x-quicktime", - ".qtif": "image/x-quicktime", - ".ram": "audio/x-pn-realaudio", - ".rdf": "application/rdf+xml", - ".rm": "application/vnd.rn-realmedia", - ".roff": "application/x-troff", - ".sgm": "text/sgml", - ".sgml": "text/sgml", - ".silo": "model/mesh", - ".skd": "application/x-koan", - ".skm": "application/x-koan", - ".skp": "application/x-koan", - ".skt": "application/x-koan", - ".smi": "application/smil", - ".smil": "application/smil", - ".snd": "audio/basic", - ".so": "application/octet-stream", - ".svg": "image/svg+xml", - ".t": "application/x-troff", - ".texi": "application/x-texinfo", - ".texinfo": "application/x-texinfo", - ".tif": "image/tiff", - ".tiff": "image/tiff", - ".tr": "application/x-troff", - ".txt": "text/plain", - ".vrml": "model/vrml", - ".vxml": "application/voicexml+xml", - ".webm": "video/webm", - ".wrl": "model/vrml", - ".xht": "application/xhtml+xml", - ".xhtml": "application/xhtml+xml", - ".xml": "application/xml", - ".xsl": "application/xml", - ".xslt": "application/xslt+xml", - ".xul": "application/vnd.mozilla.xul+xml", -} - -// TypeByExtension returns the MIME type associated with the file extension ext. -// 获取文件类型,选项ContentType使用 -func TypeByExtension(filePath string) string { - typ := mime.TypeByExtension(path.Ext(filePath)) - if typ == "" { - typ = extToMimeType[strings.ToLower(path.Ext(filePath))] - } - return typ -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go deleted file mode 100755 index 6c54587..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go +++ /dev/null @@ -1,60 +0,0 @@ -package oss - -import ( - "hash" - "io" - "net/http" -) - -// Response Http response from oss -type Response struct { - StatusCode int - Headers http.Header - Body io.ReadCloser - ClientCRC uint64 - ServerCRC uint64 -} - -// PutObjectRequest The request of DoPutObject -type PutObjectRequest struct { - ObjectKey string - Reader io.Reader -} - -// GetObjectRequest The request of DoGetObject -type GetObjectRequest struct { - ObjectKey string -} - -// GetObjectResult The result of DoGetObject -type GetObjectResult struct { - Response *Response - ClientCRC hash.Hash64 - ServerCRC uint64 -} - -// AppendObjectRequest The requtest of DoAppendObject -type AppendObjectRequest struct { - ObjectKey string - Reader io.Reader - Position int64 -} - -// AppendObjectResult The result of DoAppendObject -type AppendObjectResult struct { - NextPosition int64 - CRC uint64 -} - -// UploadPartRequest The request of DoUploadPart -type UploadPartRequest struct { - InitResult *InitiateMultipartUploadResult - Reader io.Reader - PartSize int64 - PartNumber int -} - -// UploadPartResult The result of DoUploadPart -type UploadPartResult struct { - Part UploadPart -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go deleted file mode 100755 index a543034..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go +++ /dev/null @@ -1,414 +0,0 @@ -package oss - -import ( - "crypto/md5" - "encoding/base64" - "encoding/json" - "errors" - "io/ioutil" - "os" - "path/filepath" - "strconv" -) - -// -// CopyFile 分片复制文件 -// -// srcBucketName 源Bucket名称。 -// srcObjectKey 源Object名称。 -// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。 -// partSize 复制文件片的大小,字节数。比如100 * 1024为每片100KB。 -// options Object的属性限制项。详见InitiateMultipartUpload。 -// -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error { - destBucketName := bucket.BucketName - if partSize < MinPartSize || partSize > MaxPartSize { - return errors.New("oss: part size invalid range (1024KB, 5GB]") - } - - cpConf, err := getCpConfig(options, filepath.Base(destObjectKey)) - if err != nil { - return err - } - - routines := getRoutines(options) - - if cpConf.IsEnable { - return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, - partSize, options, cpConf.FilePath, routines) - } - - return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, - partSize, options, routines) -} - -// ----- 并发无断点的下载 ----- - -// 工作协程参数 -type copyWorkerArg struct { - bucket *Bucket - imur InitiateMultipartUploadResult - srcBucketName string - srcObjectKey string - options []Option - hook copyPartHook -} - -// Hook用于测试 -type copyPartHook func(part copyPart) error - -var copyPartHooker copyPartHook = defaultCopyPartHook - -func defaultCopyPartHook(part copyPart) error { - return nil -} - -// 工作协程 -func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <- chan bool) { - for chunk := range jobs { - if err := arg.hook(chunk); err != nil { - failed <- err - break - } - chunkSize := chunk.End - chunk.Start + 1 - part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, - chunk.Start, chunkSize, chunk.Number, arg.options...) - if err != nil { - failed <- err - break - } - select { - case <-die: - return - default: - } - results <- part - } -} - -// 调度协程 -func copyScheduler(jobs chan copyPart, parts []copyPart) { - for _, part := range parts { - jobs <- part - } - close(jobs) -} - -// 分片 -type copyPart struct { - Number int // 片序号[1, 10000] - Start int64 // 片起始位置 - End int64 // 片结束位置 -} - -// 文件分片 -func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) { - meta, err := bucket.GetObjectDetailedMeta(objectKey) - if err != nil { - return nil, err - } - - parts := []copyPart{} - objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) - if err != nil { - return nil, err - } - - part := copyPart{} - i := 0 - for offset := int64(0); offset < objectSize; offset += partSize { - part.Number = i + 1 - part.Start = offset - part.End = GetPartEnd(offset, objectSize, partSize) - parts = append(parts, part) - i++ - } - return parts, nil -} - -// 并发无断点续传的下载 -func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, - partSize int64, options []Option, routines int) error { - descBucket, err := bucket.Client.Bucket(destBucketName) - srcBucket, err := bucket.Client.Bucket(srcBucketName) - - // 分割文件 - parts, err := getCopyParts(srcBucket, srcObjectKey, partSize) - if err != nil { - return err - } - - // 初始化上传任务 - imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...) - if err != nil { - return err - } - - jobs := make(chan copyPart, len(parts)) - results := make(chan UploadPart, len(parts)) - failed := make(chan error) - die := make(chan bool) - - // 启动工作协程 - arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker} - for w := 1; w <= routines; w++ { - go copyWorker(w, arg, jobs, results, failed, die) - } - - // 并发上传分片 - go copyScheduler(jobs, parts) - - // 等待分片下载完成 - completed := 0 - ups := make([]UploadPart, len(parts)) - for completed < len(parts) { - select { - case part := <-results: - completed++ - ups[part.PartNumber-1] = part - case err := <-failed: - close(die) - descBucket.AbortMultipartUpload(imur) - return err - } - - if completed >= len(parts) { - break - } - } - - // 提交任务 - _, err = descBucket.CompleteMultipartUpload(imur, ups) - if err != nil { - bucket.AbortMultipartUpload(imur) - return err - } - return nil -} - -// ----- 并发有断点的下载 ----- - -const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A" - -type copyCheckpoint struct { - Magic string // magic - MD5 string // cp内容的MD5 - SrcBucketName string // 源Bucket - SrcObjectKey string // 源Object - DestBucketName string // 目标Bucket - DestObjectKey string // 目标Bucket - CopyID string // copy id - ObjStat objectStat // 文件状态 - Parts []copyPart // 全部分片 - CopyParts []UploadPart // 分片上传成功后的返回值 - PartStat []bool // 分片下载是否完成 -} - -// CP数据是否有效,CP有效且Object没有更新时有效 -func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) { - // 比较CP的Magic及MD5 - cpb := cp - cpb.MD5 = "" - js, _ := json.Marshal(cpb) - sum := md5.Sum(js) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - - if cp.Magic != downloadCpMagic || b64 != cp.MD5 { - return false, nil - } - - // 确认object没有更新 - meta, err := bucket.GetObjectDetailedMeta(objectKey) - if err != nil { - return false, err - } - - objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) - if err != nil { - return false, err - } - - // 比较Object的大小/最后修改时间/etag - if cp.ObjStat.Size != objectSize || - cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || - cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { - return false, nil - } - - return true, nil -} - -// 从文件中load -func (cp *copyCheckpoint) load(filePath string) error { - contents, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - - err = json.Unmarshal(contents, cp) - return err -} - -// 更新分片状态 -func (cp *copyCheckpoint) update(part UploadPart) { - cp.CopyParts[part.PartNumber - 1] = part - cp.PartStat[part.PartNumber - 1] = true -} - -// dump到文件 -func (cp *copyCheckpoint) dump(filePath string) error { - bcp := *cp - - // 计算MD5 - bcp.MD5 = "" - js, err := json.Marshal(bcp) - if err != nil { - return err - } - sum := md5.Sum(js) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - bcp.MD5 = b64 - - // 序列化 - js, err = json.Marshal(bcp) - if err != nil { - return err - } - - // dump - return ioutil.WriteFile(filePath, js, 0644) -} - -// 未完成的分片 -func (cp copyCheckpoint) todoParts() []copyPart { - dps := []copyPart{} - for i, ps := range cp.PartStat { - if !ps { - dps = append(dps, cp.Parts[i]) - } - } - return dps -} - -// 初始化下载任务 -func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, - partSize int64, options []Option) error { - // cp - cp.Magic = copyCpMagic - cp.SrcBucketName = srcBucket.BucketName - cp.SrcObjectKey = srcObjectKey - cp.DestBucketName = destBucket.BucketName - cp.DestObjectKey = destObjectKey - - // object - meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey) - if err != nil { - return err - } - - objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) - if err != nil { - return err - } - - cp.ObjStat.Size = objectSize - cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) - cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) - - // parts - cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize) - if err != nil { - return err - } - cp.PartStat = make([]bool, len(cp.Parts)) - for i := range cp.PartStat { - cp.PartStat[i] = false - } - cp.CopyParts = make([]UploadPart, len(cp.Parts)) - - // init copy - imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...) - if err != nil { - return err - } - cp.CopyID = imur.UploadID - - return nil -} - -func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error { - imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName, - Key: cp.DestObjectKey, UploadID: cp.CopyID} - _, err := bucket.CompleteMultipartUpload(imur, parts) - if err != nil { - return err - } - os.Remove(cpFilePath) - return err -} - -// 并发带断点的下载 -func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, - partSize int64, options []Option, cpFilePath string, routines int) error { - descBucket, err := bucket.Client.Bucket(destBucketName) - srcBucket, err := bucket.Client.Bucket(srcBucketName) - - // LOAD CP数据 - ccp := copyCheckpoint{} - err = ccp.load(cpFilePath) - if err != nil { - os.Remove(cpFilePath) - } - - // LOAD出错或数据无效重新初始化下载 - valid, err := ccp.isValid(srcBucket, srcObjectKey) - if err != nil || !valid { - if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil { - return err - } - os.Remove(cpFilePath) - } - - // 未完成的分片 - parts := ccp.todoParts() - imur := InitiateMultipartUploadResult{ - Bucket: destBucketName, - Key: destObjectKey, - UploadID: ccp.CopyID} - - jobs := make(chan copyPart, len(parts)) - results := make(chan UploadPart, len(parts)) - failed := make(chan error) - die := make(chan bool) - - // 启动工作协程 - arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker} - for w := 1; w <= routines; w++ { - go copyWorker(w, arg, jobs, results, failed, die) - } - - // 并发下载分片 - go copyScheduler(jobs, parts) - - // 等待分片下载完成 - completed := 0 - for completed < len(parts) { - select { - case part := <-results: - completed++ - ccp.update(part); - ccp.dump(cpFilePath) - case err := <-failed: - close(die) - return err - } - - if completed >= len(parts) { - break - } - } - - return ccp.complete(descBucket, ccp.CopyParts, cpFilePath) -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go deleted file mode 100755 index 00e0538..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go +++ /dev/null @@ -1,280 +0,0 @@ -package oss - -import ( - "bytes" - "encoding/xml" - "io" - "net/http" - "os" - "sort" - "strconv" -) - -// -// InitiateMultipartUpload 初始化分片上传任务。 -// -// objectKey Object名称。 -// options 上传时可以指定Object的属性,可选属性有CacheControl、ContentDisposition、ContentEncoding、Expires、 -// ServerSideEncryption、Meta,具体含义请参考 -// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html -// -// InitiateMultipartUploadResult 初始化后操作成功的返回值,用于后面的UploadPartFromFile、UploadPartCopy等操作。error为nil时有效。 -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) { - var imur InitiateMultipartUploadResult - opts := addContentType(options, objectKey) - resp, err := bucket.do("POST", objectKey, "uploads", "uploads", opts, nil) - if err != nil { - return imur, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &imur) - return imur, err -} - -// -// UploadPart 上传分片。 -// -// 初始化一个Multipart Upload之后,可以根据指定的Object名和Upload ID来分片(Part)上传数据。 -// 每一个上传的Part都有一个标识它的号码(part number,范围是1~10000)。对于同一个Upload ID, -// 该号码不但唯一标识这一片数据,也标识了这片数据在整个文件内的相对位置。如果您用同一个part号码,上传了新的数据, -// 那么OSS上已有的这个号码的Part数据将被覆盖。除了最后一片Part以外,其他的part最小为100KB; -// 最后一片Part没有大小限制。 -// -// imur InitiateMultipartUpload成功后的返回值。 -// reader io.Reader 需要分片上传的reader。 -// size 本次上传片Part的大小。 -// partNumber 本次上传片(Part)的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。 -// -// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,即传入参数partNumber; -// ETag及上传数据的MD5。error为nil时有效。 -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader, - partSize int64, partNumber int) (UploadPart, error) { - request := &UploadPartRequest{ - InitResult: &imur, - Reader: reader, - PartSize: partSize, - PartNumber: partNumber, - } - - result, err := bucket.DoUploadPart(request) - - return result.Part, err -} - -// -// UploadPartFromFile 上传分片。 -// -// imur InitiateMultipartUpload成功后的返回值。 -// filePath 需要分片上传的本地文件。 -// startPosition 本次上传文件片的起始位置。 -// partSize 本次上传文件片的大小。 -// partNumber 本次上传文件片的编号,范围是1~10000。 -// -// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,传入参数partNumber; -// ETag上传数据的MD5。error为nil时有效。 -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string, - startPosition, partSize int64, partNumber int) (UploadPart, error) { - var part = UploadPart{} - fd, err := os.Open(filePath) - if err != nil { - return part, err - } - defer fd.Close() - fd.Seek(startPosition, os.SEEK_SET) - - request := &UploadPartRequest{ - InitResult: &imur, - Reader: fd, - PartSize: partSize, - PartNumber: partNumber, - } - - result, err := bucket.DoUploadPart(request) - - return result.Part, err -} - -// -// DoUploadPart 上传分片。 -// -// request 上传分片请求。 -// -// UploadPartResult 上传分片请求返回值。 -// error 操作无错误为nil,非nil为错误信息。 -// -func (bucket Bucket) DoUploadPart(request *UploadPartRequest) (*UploadPartResult, error) { - params := "partNumber=" + strconv.Itoa(request.PartNumber) + "&uploadId=" + request.InitResult.UploadID - opts := []Option{ContentLength(request.PartSize)} - resp, err := bucket.do("PUT", request.InitResult.Key, params, params, opts, - &io.LimitedReader{R: request.Reader, N: request.PartSize}) - if err != nil { - return &UploadPartResult{}, err - } - defer resp.Body.Close() - - part := UploadPart{ - ETag: resp.Headers.Get(HTTPHeaderEtag), - PartNumber: request.PartNumber, - } - - if bucket.getConfig().IsEnableCRC { - err = checkCRC(resp, "DoUploadPart") - if err != nil { - return &UploadPartResult{part}, err - } - } - - return &UploadPartResult{part}, nil -} - -// -// UploadPartCopy 拷贝分片。 -// -// imur InitiateMultipartUpload成功后的返回值。 -// copySrc 源Object名称。 -// startPosition 本次拷贝片(Part)在源Object的起始位置。 -// partSize 本次拷贝片的大小。 -// partNumber 本次拷贝片的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。 -// options copy时源Object的限制条件,满足限制条件时copy,不满足时返回错误。可选条件有CopySourceIfMatch、 -// CopySourceIfNoneMatch、CopySourceIfModifiedSince CopySourceIfUnmodifiedSince,具体含义请参看 -// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html -// -// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片(Part)编号,即传入参数partNumber; -// ETag及上传数据的MD5。error为nil时有效。 -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string, - startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { - var out UploadPartCopyResult - var part UploadPart - - opts := []Option{CopySource(srcBucketName, srcObjectKey), - CopySourceRange(startPosition, partSize)} - opts = append(opts, options...) - params := "partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + imur.UploadID - resp, err := bucket.do("PUT", imur.Key, params, params, opts, nil) - if err != nil { - return part, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return part, err - } - part.ETag = out.ETag - part.PartNumber = partNumber - - return part, nil -} - -// -// CompleteMultipartUpload 提交分片上传任务。 -// -// imur InitiateMultipartUpload的返回值。 -// parts UploadPart/UploadPartFromFile/UploadPartCopy返回值组成的数组。 -// -// CompleteMultipartUploadResponse 操作成功后的返回值。error为nil时有效。 -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult, - parts []UploadPart) (CompleteMultipartUploadResult, error) { - var out CompleteMultipartUploadResult - - sort.Sort(uploadParts(parts)) - cxml := completeMultipartUploadXML{} - cxml.Part = parts - bs, err := xml.Marshal(cxml) - if err != nil { - return out, err - } - buffer := new(bytes.Buffer) - buffer.Write(bs) - - params := "uploadId=" + imur.UploadID - resp, err := bucket.do("POST", imur.Key, params, params, nil, buffer) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// AbortMultipartUpload 取消分片上传任务。 -// -// imur InitiateMultipartUpload的返回值。 -// -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error { - params := "uploadId=" + imur.UploadID - resp, err := bucket.do("DELETE", imur.Key, params, params, nil, nil) - if err != nil { - return err - } - defer resp.Body.Close() - return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) -} - -// -// ListUploadedParts 列出指定上传任务已经上传的分片。 -// -// imur InitiateMultipartUpload的返回值。 -// -// ListUploadedPartsResponse 操作成功后的返回值,成员UploadedParts已经上传/拷贝的片。error为nil时该返回值有效。 -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) { - var out ListUploadedPartsResult - params := "uploadId=" + imur.UploadID - resp, err := bucket.do("GET", imur.Key, params, params, nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - return out, err -} - -// -// ListMultipartUploads 列出所有未上传完整的multipart任务列表。 -// -// options ListObject的筛选行为。Prefix返回object的前缀,KeyMarker返回object的起始位置,MaxUploads最大数目默认1000, -// Delimiter用于对Object名字进行分组的字符,所有名字包含指定的前缀且第一次出现delimiter字符之间的object。 -// -// ListMultipartUploadResponse 操作成功后的返回值,error为nil时该返回值有效。 -// error 操作成功error为nil,非nil为错误信息。 -// -func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) { - var out ListMultipartUploadResult - - options = append(options, EncodingType("url")) - params, err := handleParams(options) - if err != nil { - return out, err - } - - resp, err := bucket.do("GET", "", "uploads&"+params, "uploads", nil, nil) - if err != nil { - return out, err - } - defer resp.Body.Close() - - err = xmlUnmarshal(resp.Body, &out) - if err != nil { - return out, err - } - err = decodeListMultipartUploadResult(&out) - return out, err -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go deleted file mode 100755 index f37615d..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go +++ /dev/null @@ -1,346 +0,0 @@ -package oss - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "sort" - "strconv" - "time" -) - -type optionType string - -const ( - optionParam optionType = "HTTPParameter" // URL参数 - optionHTTP optionType = "HTTPHeader" // HTTP头 - optionArg optionType = "FuncArgument" // 函数参数 -) - -const ( - deleteObjectsQuiet = "delete-objects-quiet" - routineNum = "x-routine-num" - checkpointConfig = "x-cp-config" - initCRC64 = "init-crc64" -) - -type ( - optionValue struct { - Value string - Type optionType - } - - // Option http option - Option func(map[string]optionValue) error -) - -// ACL is an option to set X-Oss-Acl header -func ACL(acl ACLType) Option { - return setHeader(HTTPHeaderOssACL, string(acl)) -} - -// ContentType is an option to set Content-Type header -func ContentType(value string) Option { - return setHeader(HTTPHeaderContentType, value) -} - -// ContentLength is an option to set Content-Length header -func ContentLength(length int64) Option { - return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10)) -} - -// CacheControl is an option to set Cache-Control header -func CacheControl(value string) Option { - return setHeader(HTTPHeaderCacheControl, value) -} - -// ContentDisposition is an option to set Content-Disposition header -func ContentDisposition(value string) Option { - return setHeader(HTTPHeaderContentDisposition, value) -} - -// ContentEncoding is an option to set Content-Encoding header -func ContentEncoding(value string) Option { - return setHeader(HTTPHeaderContentEncoding, value) -} - -// ContentMD5 is an option to set Content-MD5 header -func ContentMD5(value string) Option { - return setHeader(HTTPHeaderContentMD5, value) -} - -// Expires is an option to set Expires header -func Expires(t time.Time) Option { - return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat)) -} - -// Meta is an option to set Meta header -func Meta(key, value string) Option { - return setHeader(HTTPHeaderOssMetaPrefix+key, value) -} - -// Range is an option to set Range header, [start, end] -func Range(start, end int64) Option { - return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end)) -} - -// AcceptEncoding is an option to set Accept-Encoding header -func AcceptEncoding(value string) Option { - return setHeader(HTTPHeaderAcceptEncoding, value) -} - -// IfModifiedSince is an option to set If-Modified-Since header -func IfModifiedSince(t time.Time) Option { - return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat)) -} - -// IfUnmodifiedSince is an option to set If-Unmodified-Since header -func IfUnmodifiedSince(t time.Time) Option { - return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat)) -} - -// IfMatch is an option to set If-Match header -func IfMatch(value string) Option { - return setHeader(HTTPHeaderIfMatch, value) -} - -// IfNoneMatch is an option to set IfNoneMatch header -func IfNoneMatch(value string) Option { - return setHeader(HTTPHeaderIfNoneMatch, value) -} - -// CopySource is an option to set X-Oss-Copy-Source header -func CopySource(sourceBucket, sourceObject string) Option { - return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject) -} - -// CopySourceRange is an option to set X-Oss-Copy-Source header -func CopySourceRange(startPosition, partSize int64) Option { - val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" + - strconv.FormatInt((startPosition+partSize-1), 10) - return setHeader(HTTPHeaderOssCopySourceRange, val) -} - -// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header -func CopySourceIfMatch(value string) Option { - return setHeader(HTTPHeaderOssCopySourceIfMatch, value) -} - -// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header -func CopySourceIfNoneMatch(value string) Option { - return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value) -} - -// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header -func CopySourceIfModifiedSince(t time.Time) Option { - return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat)) -} - -// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header -func CopySourceIfUnmodifiedSince(t time.Time) Option { - return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat)) -} - -// MetadataDirective is an option to set X-Oss-Metadata-Directive header -func MetadataDirective(directive MetadataDirectiveType) Option { - return setHeader(HTTPHeaderOssMetadataDirective, string(directive)) -} - -// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header -func ServerSideEncryption(value string) Option { - return setHeader(HTTPHeaderOssServerSideEncryption, value) -} - -// ObjectACL is an option to set X-Oss-Object-Acl header -func ObjectACL(acl ACLType) Option { - return setHeader(HTTPHeaderOssObjectACL, string(acl)) -} - -// Origin is an option to set Origin header -func Origin(value string) Option { - return setHeader(HTTPHeaderOrigin, value) -} - -// Delimiter is an option to set delimiler parameter -func Delimiter(value string) Option { - return addParam("delimiter", value) -} - -// Marker is an option to set marker parameter -func Marker(value string) Option { - return addParam("marker", value) -} - -// MaxKeys is an option to set maxkeys parameter -func MaxKeys(value int) Option { - return addParam("max-keys", strconv.Itoa(value)) -} - -// Prefix is an option to set prefix parameter -func Prefix(value string) Option { - return addParam("prefix", value) -} - -// EncodingType is an option to set encoding-type parameter -func EncodingType(value string) Option { - return addParam("encoding-type", value) -} - -// MaxUploads is an option to set max-uploads parameter -func MaxUploads(value int) Option { - return addParam("max-uploads", strconv.Itoa(value)) -} - -// KeyMarker is an option to set key-marker parameter -func KeyMarker(value string) Option { - return addParam("key-marker", value) -} - -// UploadIDMarker is an option to set upload-id-marker parameter -func UploadIDMarker(value string) Option { - return addParam("upload-id-marker", value) -} - -// DeleteObjectsQuiet DeleteObjects详细(verbose)模式或简单(quiet)模式,默认详细模式。 -func DeleteObjectsQuiet(isQuiet bool) Option { - return addArg(deleteObjectsQuiet, strconv.FormatBool(isQuiet)) -} - -type cpConfig struct { - IsEnable bool - FilePath string -} - -// Checkpoint DownloadFile/UploadFile是否开启checkpoint及checkpoint文件路径 -func Checkpoint(isEnable bool, filePath string) Option { - res, _ := json.Marshal(cpConfig{isEnable, filePath}) - return addArg(checkpointConfig, string(res)) -} - -// Routines DownloadFile/UploadFile并发数 -func Routines(n int) Option { - return addArg(routineNum, strconv.Itoa(n)) -} - -// InitCRC AppendObject CRC的校验的初始值 -func InitCRC(initCRC uint64) Option { - return addArg(initCRC64, strconv.FormatUint(initCRC, 10)) -} - -func setHeader(key, value string) Option { - return func(params map[string]optionValue) error { - if value == "" { - return nil - } - params[key] = optionValue{value, optionHTTP} - return nil - } -} - -func addParam(key, value string) Option { - return func(params map[string]optionValue) error { - if value == "" { - return nil - } - params[key] = optionValue{value, optionParam} - return nil - } -} - -func addArg(key, value string) Option { - return func(params map[string]optionValue) error { - if value == "" { - return nil - } - params[key] = optionValue{value, optionArg} - return nil - } -} - -func handleOptions(headers map[string]string, options []Option) error { - params := map[string]optionValue{} - for _, option := range options { - if option != nil { - if err := option(params); err != nil { - return err - } - } - } - - for k, v := range params { - if v.Type == optionHTTP { - headers[k] = v.Value - } - } - return nil -} - -func handleParams(options []Option) (string, error) { - // option - params := map[string]optionValue{} - for _, option := range options { - if option != nil { - if err := option(params); err != nil { - return "", err - } - } - } - - // sort - var buf bytes.Buffer - keys := make([]string, 0, len(params)) - for k, v := range params { - if v.Type == optionParam { - keys = append(keys, k) - } - } - sort.Strings(keys) - - // serialize - for _, k := range keys { - vs := params[k] - prefix := url.QueryEscape(k) + "=" - - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(url.QueryEscape(vs.Value)) - } - - return buf.String(), nil -} - -func findOption(options []Option, param, defaultVal string) (string, error) { - params := map[string]optionValue{} - for _, option := range options { - if option != nil { - if err := option(params); err != nil { - return "", err - } - } - } - - if val, ok := params[param]; ok { - return val.Value, nil - } - return defaultVal, nil -} - -func isOptionSet(options []Option, option string) (bool, string, error) { - params := map[string]optionValue{} - for _, option := range options { - if option != nil { - if err := option(params); err != nil { - return false, "", err - } - } - } - - if val, ok := params[option]; ok { - return true, val.Value, nil - } - return false, "", nil -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go deleted file mode 100755 index acfc595..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go +++ /dev/null @@ -1,442 +0,0 @@ -package oss - -import ( - "encoding/xml" - "net/url" - "time" -) - -// ListBucketsResult ListBuckets请求返回的结果 -type ListBucketsResult struct { - XMLName xml.Name `xml:"ListAllMyBucketsResult"` - Prefix string `xml:"Prefix"` // 本次查询结果的前缀 - Marker string `xml:"Marker"` // 标明查询的起点,未全部返回时有此节点 - MaxKeys int `xml:"MaxKeys"` // 返回结果的最大数目,未全部返回时有此节点 - IsTruncated bool `xml:"IsTruncated"` // 所有的结果是否已经全部返回 - NextMarker string `xml:"NextMarker"` // 表示下一次查询的起点 - Owner Owner `xml:"Owner"` // 拥有者信息 - Buckets []BucketProperties `xml:"Buckets>Bucket"` // Bucket列表 -} - -// BucketProperties Bucket信息 -type BucketProperties struct { - XMLName xml.Name `xml:"Bucket"` - Name string `xml:"Name"` // Bucket名称 - Location string `xml:"Location"` // Bucket所在的数据中心 - CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间 -} - -// GetBucketACLResult GetBucketACL请求返回的结果 -type GetBucketACLResult struct { - XMLName xml.Name `xml:"AccessControlPolicy"` - ACL string `xml:"AccessControlList>Grant"` // Bucket权限 - Owner Owner `xml:"Owner"` // Bucket拥有者信息 -} - -// LifecycleConfiguration Bucket的Lifecycle配置 -type LifecycleConfiguration struct { - XMLName xml.Name `xml:"LifecycleConfiguration"` - Rules []LifecycleRule `xml:"Rule"` -} - -// LifecycleRule Lifecycle规则 -type LifecycleRule struct { - XMLName xml.Name `xml:"Rule"` - ID string `xml:"ID"` // 规则唯一的ID - Prefix string `xml:"Prefix"` // 规则所适用Object的前缀 - Status string `xml:"Status"` // 规则是否生效 - Expiration LifecycleExpiration `xml:"Expiration"` // 规则的过期属性 -} - -// LifecycleExpiration 规则的过期属性 -type LifecycleExpiration struct { - XMLName xml.Name `xml:"Expiration"` - Days int `xml:"Days,omitempty"` // 最后修改时间过后多少天生效 - Date time.Time `xml:"Date,omitempty"` // 指定规则何时生效 -} - -type lifecycleXML struct { - XMLName xml.Name `xml:"LifecycleConfiguration"` - Rules []lifecycleRule `xml:"Rule"` -} - -type lifecycleRule struct { - XMLName xml.Name `xml:"Rule"` - ID string `xml:"ID"` - Prefix string `xml:"Prefix"` - Status string `xml:"Status"` - Expiration lifecycleExpiration `xml:"Expiration"` -} - -type lifecycleExpiration struct { - XMLName xml.Name `xml:"Expiration"` - Days int `xml:"Days,omitempty"` - Date string `xml:"Date,omitempty"` -} - -const expirationDateFormat = "2006-01-02T15:04:05.000Z" - -func convLifecycleRule(rules []LifecycleRule) []lifecycleRule { - rs := []lifecycleRule{} - for _, rule := range rules { - r := lifecycleRule{} - r.ID = rule.ID - r.Prefix = rule.Prefix - r.Status = rule.Status - if rule.Expiration.Date.IsZero() { - r.Expiration.Days = rule.Expiration.Days - } else { - r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat) - } - rs = append(rs, r) - } - return rs -} - -// BuildLifecycleRuleByDays 指定过期天数构建Lifecycle规则 -func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule { - var statusStr = "Enabled" - if !status { - statusStr = "Disabled" - } - return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, - Expiration: LifecycleExpiration{Days: days}} -} - -// BuildLifecycleRuleByDate 指定过期时间构建Lifecycle规则 -func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule { - var statusStr = "Enabled" - if !status { - statusStr = "Disabled" - } - date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) - return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, - Expiration: LifecycleExpiration{Date: date}} -} - -// GetBucketLifecycleResult GetBucketLifecycle请求请求结果 -type GetBucketLifecycleResult LifecycleConfiguration - -// RefererXML Referer配置 -type RefererXML struct { - XMLName xml.Name `xml:"RefererConfiguration"` - AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // 是否允许referer字段为空的请求访问 - RefererList []string `xml:"RefererList>Referer"` // referer访问白名单 -} - -// GetBucketRefererResult GetBucketReferer请教返回结果 -type GetBucketRefererResult RefererXML - -// LoggingXML Logging配置 -type LoggingXML struct { - XMLName xml.Name `xml:"BucketLoggingStatus"` - LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // 访问日志信息容器 -} - -type loggingXMLEmpty struct { - XMLName xml.Name `xml:"BucketLoggingStatus"` -} - -// LoggingEnabled 访问日志信息容器 -type LoggingEnabled struct { - XMLName xml.Name `xml:"LoggingEnabled"` - TargetBucket string `xml:"TargetBucket"` //存放访问日志的Bucket - TargetPrefix string `xml:"TargetPrefix"` //保存访问日志的文件前缀 -} - -// GetBucketLoggingResult GetBucketLogging请求返回结果 -type GetBucketLoggingResult LoggingXML - -// WebsiteXML Website配置 -type WebsiteXML struct { - XMLName xml.Name `xml:"WebsiteConfiguration"` - IndexDocument IndexDocument `xml:"IndexDocument"` // 目录URL时添加的索引文件 - ErrorDocument ErrorDocument `xml:"ErrorDocument"` // 404错误时使用的文件 -} - -// IndexDocument 目录URL时添加的索引文件 -type IndexDocument struct { - XMLName xml.Name `xml:"IndexDocument"` - Suffix string `xml:"Suffix"` // 目录URL时添加的索引文件名 -} - -// ErrorDocument 404错误时使用的文件 -type ErrorDocument struct { - XMLName xml.Name `xml:"ErrorDocument"` - Key string `xml:"Key"` // 404错误时使用的文件名 -} - -// GetBucketWebsiteResult GetBucketWebsite请求返回结果 -type GetBucketWebsiteResult WebsiteXML - -// CORSXML CORS配置 -type CORSXML struct { - XMLName xml.Name `xml:"CORSConfiguration"` - CORSRules []CORSRule `xml:"CORSRule"` // CORS规则列表 -} - -// CORSRule CORS规则 -type CORSRule struct { - XMLName xml.Name `xml:"CORSRule"` - AllowedOrigin []string `xml:"AllowedOrigin"` // 允许的来源,默认通配符"*" - AllowedMethod []string `xml:"AllowedMethod"` // 允许的方法 - AllowedHeader []string `xml:"AllowedHeader"` // 允许的请求头 - ExposeHeader []string `xml:"ExposeHeader"` // 允许的响应头 - MaxAgeSeconds int `xml:"MaxAgeSeconds"` // 最大的缓存时间 -} - -// GetBucketCORSResult GetBucketCORS请求返回的结果 -type GetBucketCORSResult CORSXML - -// GetBucketInfoResult GetBucketInfo请求返回结果 -type GetBucketInfoResult struct { - XMLName xml.Name `xml:"BucketInfo"` - BucketInfo BucketInfo `xml:"Bucket"` -} - -// BucketInfo Bucket信息 -type BucketInfo struct { - XMLName xml.Name `xml:"Bucket"` - Name string `xml:"Name"` // Bucket名称 - Location string `xml:"Location"` // Bucket所在的数据中心 - CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间 - ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket访问的外网域名 - IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket访问的内网域名 - ACL string `xml:"AccessControlList>Grant"` // Bucket权限 - Owner Owner `xml:"Owner"` // Bucket拥有者信息 -} - -// ListObjectsResult ListObjects请求返回结果 -type ListObjectsResult struct { - XMLName xml.Name `xml:"ListBucketResult"` - Prefix string `xml:"Prefix"` // 本次查询结果的开始前缀 - Marker string `xml:"Marker"` // 这次查询的起点 - MaxKeys int `xml:"MaxKeys"` // 请求返回结果的最大数目 - Delimiter string `xml:"Delimiter"` // 对Object名字进行分组的字符 - IsTruncated bool `xml:"IsTruncated"` // 是否所有的结果都已经返回 - NextMarker string `xml:"NextMarker"` // 下一次查询的起点 - Objects []ObjectProperties `xml:"Contents"` // Object类别 - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 以delimiter结尾并有共同前缀的Object的集合 -} - -// ObjectProperties Objecct属性 -type ObjectProperties struct { - XMLName xml.Name `xml:"Contents"` - Key string `xml:"Key"` // Object的Key - Type string `xml:"Type"` // Object Type - Size int64 `xml:"Size"` // Object的长度字节数 - ETag string `xml:"ETag"` // 标示Object的内容 - Owner Owner `xml:"Owner"` // 保存Object拥有者信息的容器 - LastModified time.Time `xml:"LastModified"` // Object最后修改时间 - StorageClass string `xml:"StorageClass"` // Object的存储类型,目前只能是Standard -} - -// Owner Bucket/Object的owner -type Owner struct { - XMLName xml.Name `xml:"Owner"` - ID string `xml:"ID"` // 用户ID - DisplayName string `xml:"DisplayName"` // Owner名字 -} - -// CopyObjectResult CopyObject请求返回的结果 -type CopyObjectResult struct { - XMLName xml.Name `xml:"CopyObjectResult"` - LastModified time.Time `xml:"LastModified"` // 新Object最后更新时间 - ETag string `xml:"ETag"` // 新Object的ETag值 -} - -// GetObjectACLResult GetObjectACL请求返回的结果 -type GetObjectACLResult GetBucketACLResult - -type deleteXML struct { - XMLName xml.Name `xml:"Delete"` - Objects []DeleteObject `xml:"Object"` // 删除的所有Object - Quiet bool `xml:"Quiet"` // 安静响应模式 -} - -// DeleteObject 删除的Object -type DeleteObject struct { - XMLName xml.Name `xml:"Object"` - Key string `xml:"Key"` // Object名称 -} - -// DeleteObjectsResult DeleteObjects请求返回结果 -type DeleteObjectsResult struct { - XMLName xml.Name `xml:"DeleteResult"` - DeletedObjects []string `xml:"Deleted>Key"` // 删除的Object列表 -} - -// InitiateMultipartUploadResult InitiateMultipartUpload请求返回结果 -type InitiateMultipartUploadResult struct { - XMLName xml.Name `xml:"InitiateMultipartUploadResult"` - Bucket string `xml:"Bucket"` // Bucket名称 - Key string `xml:"Key"` // 上传Object名称 - UploadID string `xml:"UploadId"` // 生成的UploadId -} - -// UploadPart 上传/拷贝的分片 -type UploadPart struct { - XMLName xml.Name `xml:"Part"` - PartNumber int `xml:"PartNumber"` // Part编号 - ETag string `xml:"ETag"` // ETag缓存码 -} - -type uploadParts []UploadPart - -func (slice uploadParts) Len() int { - return len(slice) -} - -func (slice uploadParts) Less(i, j int) bool { - return slice[i].PartNumber < slice[j].PartNumber -} - -func (slice uploadParts) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} - -// UploadPartCopyResult 拷贝分片请求返回的结果 -type UploadPartCopyResult struct { - XMLName xml.Name `xml:"CopyPartResult"` - LastModified time.Time `xml:"LastModified"` // 最后修改时间 - ETag string `xml:"ETag"` // ETag -} - -type completeMultipartUploadXML struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Part []UploadPart `xml:"Part"` -} - -// CompleteMultipartUploadResult 提交分片上传任务返回结果 -type CompleteMultipartUploadResult struct { - XMLName xml.Name `xml:"CompleteMultipartUploadResult"` - Location string `xml:"Location"` // Object的URL - Bucket string `xml:"Bucket"` // Bucket名称 - ETag string `xml:"ETag"` // Object的ETag - Key string `xml:"Key"` // Object的名字 -} - -// ListUploadedPartsResult ListUploadedParts请求返回结果 -type ListUploadedPartsResult struct { - XMLName xml.Name `xml:"ListPartsResult"` - Bucket string `xml:"Bucket"` // Bucket名称 - Key string `xml:"Key"` // Object名称 - UploadID string `xml:"UploadId"` // 上传Id - NextPartNumberMarker string `xml:"NextPartNumberMarker"` // 下一个Part的位置 - MaxParts int `xml:"MaxParts"` // 最大Part个数 - IsTruncated bool `xml:"IsTruncated"` // 是否完全上传完成 - UploadedParts []UploadedPart `xml:"Part"` // 已完成的Part -} - -// UploadedPart 该任务已经上传的分片 -type UploadedPart struct { - XMLName xml.Name `xml:"Part"` - PartNumber int `xml:"PartNumber"` // Part编号 - LastModified time.Time `xml:"LastModified"` // 最后一次修改时间 - ETag string `xml:"ETag"` // ETag缓存码 - Size int `xml:"Size"` // Part大小 -} - -// ListMultipartUploadResult ListMultipartUpload请求返回结果 -type ListMultipartUploadResult struct { - XMLName xml.Name `xml:"ListMultipartUploadsResult"` - Bucket string `xml:"Bucket"` // Bucket名称 - Delimiter string `xml:"Delimiter"` // 分组分割符 - Prefix string `xml:"Prefix"` // 筛选前缀 - KeyMarker string `xml:"KeyMarker"` // 起始Object位置 - UploadIDMarker string `xml:"UploadIdMarker"` // 起始UploadId位置 - NextKeyMarker string `xml:"NextKeyMarker"` // 如果没有全部返回,标明接下去的KeyMarker位置 - NextUploadIDMarker string `xml:"NextUploadIdMarker"` // 如果没有全部返回,标明接下去的UploadId位置 - MaxUploads int `xml:"MaxUploads"` // 返回最大Upload数目 - IsTruncated bool `xml:"IsTruncated"` // 是否完全返回 - Uploads []UncompletedUpload `xml:"Upload"` // 未完成上传的MultipartUpload - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 所有名字包含指定的前缀且第一次出现delimiter字符之间的object作为一组的分组结果 -} - -// UncompletedUpload 未完成的Upload任务 -type UncompletedUpload struct { - XMLName xml.Name `xml:"Upload"` - Key string `xml:"Key"` // Object名称 - UploadID string `xml:"UploadId"` // 对应UploadId - Initiated time.Time `xml:"Initiated"` // 初始化时间,格式2012-02-23T04:18:23.000Z -} - -// 解析URL编码 -func decodeDeleteObjectsResult(result *DeleteObjectsResult) error { - var err error - for i := 0; i < len(result.DeletedObjects); i++ { - result.DeletedObjects[i], err = url.QueryUnescape(result.DeletedObjects[i]) - if err != nil { - return err - } - } - return nil -} - -// 解析URL编码 -func decodeListObjectsResult(result *ListObjectsResult) error { - var err error - result.Prefix, err = url.QueryUnescape(result.Prefix) - if err != nil { - return err - } - result.Marker, err = url.QueryUnescape(result.Marker) - if err != nil { - return err - } - result.Delimiter, err = url.QueryUnescape(result.Delimiter) - if err != nil { - return err - } - result.NextMarker, err = url.QueryUnescape(result.NextMarker) - if err != nil { - return err - } - for i := 0; i < len(result.Objects); i++ { - result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) - if err != nil { - return err - } - } - for i := 0; i < len(result.CommonPrefixes); i++ { - result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) - if err != nil { - return err - } - } - return nil -} - -// 解析URL编码 -func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error { - var err error - result.Prefix, err = url.QueryUnescape(result.Prefix) - if err != nil { - return err - } - result.Delimiter, err = url.QueryUnescape(result.Delimiter) - if err != nil { - return err - } - result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) - if err != nil { - return err - } - result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) - if err != nil { - return err - } - for i := 0; i < len(result.Uploads); i++ { - result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key) - if err != nil { - return err - } - } - for i := 0; i < len(result.CommonPrefixes); i++ { - result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go deleted file mode 100755 index ada9c24..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go +++ /dev/null @@ -1,438 +0,0 @@ -package oss - -import ( - "crypto/md5" - "encoding/base64" - "encoding/json" - "errors" - "io/ioutil" - "os" - "strconv" - "time" -) - -// -// UploadFile 分片上传文件 -// -// objectKey object名称。 -// filePath 本地文件。需要上传的文件。 -// partSize 本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。 -// options 上传Object时可以指定Object的属性。详见InitiateMultipartUpload。 -// -// error 操作成功为nil,非nil为错误信息。 -// -func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { - if partSize < MinPartSize || partSize > MaxPartSize { - return errors.New("oss: part size invalid range (1024KB, 5GB]") - } - - cpConf, err := getCpConfig(options, filePath) - if err != nil { - return err - } - - routines := getRoutines(options) - - if cpConf.IsEnable { - return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines) - } - - return bucket.uploadFile(objectKey, filePath, partSize, options, routines) -} - -// ----- 并发无断点的上传 ----- - -// 获取Checkpoint配置 -func getCpConfig(options []Option, filePath string) (*cpConfig, error) { - cpc := cpConfig{} - cpStr, err := findOption(options, checkpointConfig, "") - if err != nil { - return nil, err - } - - if cpStr != "" { - if err = json.Unmarshal([]byte(cpStr), &cpc); err != nil { - return nil, err - } - } - - if cpc.IsEnable && cpc.FilePath == "" { - cpc.FilePath = filePath + ".cp" - } - - return &cpc, nil -} - -// 获取并发数,默认并发数1 -func getRoutines(options []Option) int { - rStr, err := findOption(options, routineNum, "") - if err != nil || rStr == "" { - return 1 - } - - rs, err := strconv.Atoi(rStr) - if err != nil { - return 1 - } - - if rs < 1 { - rs = 1 - } else if rs > 100 { - rs = 100 - } - - return rs -} - -// 测试使用 -type uploadPartHook func(id int, chunk FileChunk) error - -var uploadPartHooker uploadPartHook = defaultUploadPart - -func defaultUploadPart(id int, chunk FileChunk) error { - return nil -} - -// 工作协程参数 -type workerArg struct { - bucket *Bucket - filePath string - imur InitiateMultipartUploadResult - hook uploadPartHook -} - -// 工作协程 -func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <- chan bool) { - for chunk := range jobs { - if err := arg.hook(id, chunk); err != nil { - failed <- err - break - } - part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number) - if err != nil { - failed <- err - break - } - select { - case <-die: - return - default: - } - results <- part - } -} - -// 调度协程 -func scheduler(jobs chan FileChunk, chunks []FileChunk) { - for _, chunk := range chunks { - jobs <- chunk - } - close(jobs) -} - -// 并发上传,不带断点续传功能 -func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { - chunks, err := SplitFileByPartSize(filePath, partSize) - if err != nil { - return err - } - - // 初始化上传任务 - imur, err := bucket.InitiateMultipartUpload(objectKey, options...) - if err != nil { - return err - } - - jobs := make(chan FileChunk, len(chunks)) - results := make(chan UploadPart, len(chunks)) - failed := make(chan error) - die := make(chan bool) - - // 启动工作协程 - arg := workerArg{&bucket, filePath, imur, uploadPartHooker} - for w := 1; w <= routines; w++ { - go worker(w, arg, jobs, results, failed, die) - } - - // 并发上传分片 - go scheduler(jobs, chunks) - - // 等待分配分片上传完成 - completed := 0 - parts := make([]UploadPart, len(chunks)) - for completed < len(chunks) { - select { - case part := <-results: - completed++ - parts[part.PartNumber-1] = part - case err := <-failed: - close(die) - bucket.AbortMultipartUpload(imur) - return err - } - - if completed >= len(chunks) { - break - } - } - - // 提交任务 - _, err = bucket.CompleteMultipartUpload(imur, parts) - if err != nil { - bucket.AbortMultipartUpload(imur) - return err - } - return nil -} - -// ----- 并发带断点的上传 ----- -const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62" - -type uploadCheckpoint struct { - Magic string // magic - MD5 string // cp内容的MD5 - FilePath string // 本地文件 - FileStat cpStat // 文件状态 - ObjectKey string // key - UploadID string // upload id - Parts []cpPart // 本地文件的全部分片 -} - -type cpStat struct { - Size int64 // 文件大小 - LastModified time.Time // 本地文件最后修改时间 - MD5 string // 本地文件MD5 -} - -type cpPart struct { - Chunk FileChunk // 分片 - Part UploadPart // 上传完成的分片 - IsCompleted bool // upload是否完成 -} - -// CP数据是否有效,CP有效且文件没有更新时有效 -func (cp uploadCheckpoint) isValid(filePath string) (bool, error) { - // 比较CP的Magic及MD5 - cpb := cp - cpb.MD5 = "" - js, _ := json.Marshal(cpb) - sum := md5.Sum(js) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - - if cp.Magic != uploadCpMagic || b64 != cp.MD5 { - return false, nil - } - - // 确认本地文件是否更新 - fd, err := os.Open(filePath) - if err != nil { - return false, err - } - defer fd.Close() - - st, err := fd.Stat() - if err != nil { - return false, err - } - - md, err := calcFileMD5(filePath) - if err != nil { - return false, err - } - - // 比较文件大小/文件最后更新时间/文件MD5 - if cp.FileStat.Size != st.Size() || - cp.FileStat.LastModified != st.ModTime() || - cp.FileStat.MD5 != md { - return false, nil - } - - return true, nil -} - -// 从文件中load -func (cp *uploadCheckpoint) load(filePath string) error { - contents, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - - err = json.Unmarshal(contents, cp) - return err -} - -// dump到文件 -func (cp *uploadCheckpoint) dump(filePath string) error { - bcp := *cp - - // 计算MD5 - bcp.MD5 = "" - js, err := json.Marshal(bcp) - if err != nil { - return err - } - sum := md5.Sum(js) - b64 := base64.StdEncoding.EncodeToString(sum[:]) - bcp.MD5 = b64 - - // 序列化 - js, err = json.Marshal(bcp) - if err != nil { - return err - } - - // dump - return ioutil.WriteFile(filePath, js, 0644) -} - -// 更新分片状态 -func (cp *uploadCheckpoint) updatePart(part UploadPart) { - cp.Parts[part.PartNumber-1].Part = part - cp.Parts[part.PartNumber-1].IsCompleted = true -} - -// 未完成的分片 -func (cp *uploadCheckpoint) todoParts() []FileChunk { - fcs := []FileChunk{} - for _, part := range cp.Parts { - if !part.IsCompleted { - fcs = append(fcs, part.Chunk) - } - } - return fcs -} - -// 所有的分片 -func (cp *uploadCheckpoint) allParts() []UploadPart { - ps := []UploadPart{} - for _, part := range cp.Parts { - ps = append(ps, part.Part) - } - return ps -} - -// 计算文件文件MD5 -func calcFileMD5(filePath string) (string, error) { - return "", nil -} - -// 初始化分片上传 -func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { - // cp - cp.Magic = uploadCpMagic - cp.FilePath = filePath - cp.ObjectKey = objectKey - - // localfile - fd, err := os.Open(filePath) - if err != nil { - return err - } - defer fd.Close() - - st, err := fd.Stat() - if err != nil { - return err - } - cp.FileStat.Size = st.Size() - cp.FileStat.LastModified = st.ModTime() - md, err := calcFileMD5(filePath) - if err != nil { - return err - } - cp.FileStat.MD5 = md - - // chunks - parts, err := SplitFileByPartSize(filePath, partSize) - if err != nil { - return err - } - - cp.Parts = make([]cpPart, len(parts)) - for i, part := range parts { - cp.Parts[i].Chunk = part - cp.Parts[i].IsCompleted = false - } - - // init load - imur, err := bucket.InitiateMultipartUpload(objectKey, options...) - if err != nil { - return err - } - cp.UploadID = imur.UploadID - - return nil -} - -// 提交分片上传,删除CP文件 -func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error { - imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, - Key: cp.ObjectKey, UploadID: cp.UploadID} - _, err := bucket.CompleteMultipartUpload(imur, parts) - if err != nil { - return err - } - os.Remove(cpFilePath) - return err -} - -// 并发带断点的上传 -func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { - // LOAD CP数据 - ucp := uploadCheckpoint{} - err := ucp.load(cpFilePath) - if err != nil { - os.Remove(cpFilePath) - } - - // LOAD出错或数据无效重新初始化上传 - valid, err := ucp.isValid(filePath) - if err != nil || !valid { - if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { - return err - } - os.Remove(cpFilePath) - } - - chunks := ucp.todoParts() - imur := InitiateMultipartUploadResult{ - Bucket: bucket.BucketName, - Key: objectKey, - UploadID: ucp.UploadID} - - jobs := make(chan FileChunk, len(chunks)) - results := make(chan UploadPart, len(chunks)) - failed := make(chan error) - die := make(chan bool) - - // 启动工作协程 - arg := workerArg{&bucket, filePath, imur, uploadPartHooker} - for w := 1; w <= routines; w++ { - go worker(w, arg, jobs, results, failed, die) - } - - // 并发上传分片 - go scheduler(jobs, chunks) - - // 等待分配分片上传完成 - completed := 0 - for completed < len(chunks) { - select { - case part := <-results: - completed++ - ucp.updatePart(part) - ucp.dump(cpFilePath) - case err := <-failed: - close(die) - return err - } - - if completed >= len(chunks) { - break - } - } - - // 提交分片上传 - err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath) - return err -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go deleted file mode 100755 index 15eb3ed..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go +++ /dev/null @@ -1,165 +0,0 @@ -package oss - -import ( - "bytes" - "errors" - "fmt" - "hash/crc64" - "net/http" - "os" - "os/exec" - "runtime" - "time" -) - -// Get User Agent -// Go sdk相关信息,包括sdk版本,操作系统类型,GO版本 -var userAgent = func() string { - sys := getSysInfo() - return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name, - sys.release, sys.machine, runtime.Version()) -}() - -type sysInfo struct { - name string // 操作系统名称windows/Linux - release string // 操作系统版本 2.6.32-220.23.2.ali1089.el5.x86_64等 - machine string // 机器类型amd64/x86_64 -} - -// Get system info -// 获取操作系统信息、机器类型 -func getSysInfo() sysInfo { - name := runtime.GOOS - release := "-" - machine := runtime.GOARCH - if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil { - name = string(bytes.TrimSpace(out)) - } - if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil { - release = string(bytes.TrimSpace(out)) - } - if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil { - machine = string(bytes.TrimSpace(out)) - } - return sysInfo{name: name, release: release, machine: machine} -} - -// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. -// 获取当前时间,从UTC开始的秒数。 -func GetNowSec() int64 { - return time.Now().Unix() -} - -// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. The result is undefined if the Unix time -// in nanoseconds cannot be represented by an int64. Note that this -// means the result of calling UnixNano on the zero Time is undefined. -// 获取当前时间,从UTC开始的纳秒。 -func GetNowNanoSec() int64 { - return time.Now().UnixNano() -} - -// GetNowGMT 获取当前时间,格式形如"Mon, 02 Jan 2006 15:04:05 GMT",HTTP中使用的时间格式 -func GetNowGMT() string { - return time.Now().UTC().Format(http.TimeFormat) -} - -// FileChunk 文件片定义 -type FileChunk struct { - Number int // 块序号 - Offset int64 // 块在文件中的偏移量 - Size int64 // 块大小 -} - -// SplitFileByPartNum Split big file to part by the num of part -// 按指定的块数分割文件。返回值FileChunk为分割结果,error为nil时有效。 -func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) { - if chunkNum <= 0 || chunkNum > 10000 { - return nil, errors.New("chunkNum invalid") - } - - file, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer file.Close() - - stat, err := file.Stat() - if err != nil { - return nil, err - } - - if int64(chunkNum) > stat.Size() { - return nil, errors.New("oss: chunkNum invalid") - } - - var chunks []FileChunk - var chunk = FileChunk{} - var chunkN = (int64)(chunkNum) - for i := int64(0); i < chunkN; i++ { - chunk.Number = int(i + 1) - chunk.Offset = i * (stat.Size() / chunkN) - if i == chunkN-1 { - chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN - } else { - chunk.Size = stat.Size() / chunkN - } - chunks = append(chunks, chunk) - } - - return chunks, nil -} - -// SplitFileByPartSize Split big file to part by the size of part -// 按块大小分割文件。返回值FileChunk为分割结果,error为nil时有效。 -func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) { - if chunkSize <= 0 { - return nil, errors.New("chunkSize invalid") - } - - file, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer file.Close() - - stat, err := file.Stat() - if err != nil { - return nil, err - } - var chunkN = stat.Size() / chunkSize - if chunkN >= 10000 { - return nil, errors.New("Too many parts, please increase part size.") - } - - var chunks []FileChunk - var chunk = FileChunk{} - for i := int64(0); i < chunkN; i++ { - chunk.Number = int(i + 1) - chunk.Offset = i * chunkSize - chunk.Size = chunkSize - chunks = append(chunks, chunk) - } - - if stat.Size()%chunkSize > 0 { - chunk.Number = len(chunks) + 1 - chunk.Offset = int64(len(chunks)) * chunkSize - chunk.Size = stat.Size() % chunkSize - chunks = append(chunks, chunk) - } - - return chunks, nil -} - -// GetPartEnd 计算结束位置 -func GetPartEnd(begin int64, total int64, per int64) int64 { - if begin+per > total { - return total - 1 - } - return begin + per - 1 -} - -// crcTable returns the Table constructed from the specified polynomial -var crcTable = func() *crc64.Table { - return crc64.MakeTable(crc64.ECMA) -} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/sample.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/sample.go deleted file mode 100644 index 8fd8cc8..0000000 --- a/vendor/github.com/aliyun/aliyun-oss-go-sdk/sample.go +++ /dev/null @@ -1,36 +0,0 @@ -// main of samples - -package main - -import ( - "fmt" - - "github.com/aliyun/aliyun-oss-go-sdk/sample" -) - -func main() { - sample.CreateBucketSample() - sample.NewBucketSample() - sample.ListBucketsSample() - sample.BucketACLSample() - sample.BucketLifecycleSample() - sample.BucketRefererSample() - sample.BucketLoggingSample() - sample.BucketCORSSample() - - sample.ObjectACLSample() - sample.ObjectMetaSample() - sample.ListObjectsSample() - sample.DeleteObjectSample() - sample.AppendObjectSample() - sample.CopyObjectSample() - sample.PutObjectSample() - sample.GetObjectSample() - - sample.CnameSample() - sample.SignURLSample() - - sample.ArchiveSample() - - fmt.Println("All samples completed") -} diff --git a/vendor/github.com/andybalholm/cascadia/LICENSE b/vendor/github.com/andybalholm/cascadia/LICENSE deleted file mode 100755 index ee5ad35..0000000 --- a/vendor/github.com/andybalholm/cascadia/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2011 Andy Balholm. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/andybalholm/cascadia/README.md b/vendor/github.com/andybalholm/cascadia/README.md deleted file mode 100644 index 9021cb9..0000000 --- a/vendor/github.com/andybalholm/cascadia/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# cascadia - -[![](https://travis-ci.org/andybalholm/cascadia.svg)](https://travis-ci.org/andybalholm/cascadia) - -The Cascadia package implements CSS selectors for use with the parse trees produced by the html package. - -To test CSS selectors without writing Go code, check out [cascadia](https://github.com/suntong/cascadia) the command line tool, a thin wrapper around this package. diff --git a/vendor/github.com/andybalholm/cascadia/go.mod b/vendor/github.com/andybalholm/cascadia/go.mod deleted file mode 100644 index e6febbb..0000000 --- a/vendor/github.com/andybalholm/cascadia/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module "github.com/andybalholm/cascadia" - -require "golang.org/x/net" v0.0.0-20180218175443-cbe0f9307d01 diff --git a/vendor/github.com/andybalholm/cascadia/parser.go b/vendor/github.com/andybalholm/cascadia/parser.go deleted file mode 100644 index 495db9c..0000000 --- a/vendor/github.com/andybalholm/cascadia/parser.go +++ /dev/null @@ -1,835 +0,0 @@ -// Package cascadia is an implementation of CSS selectors. -package cascadia - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "golang.org/x/net/html" -) - -// a parser for CSS selectors -type parser struct { - s string // the source text - i int // the current position -} - -// parseEscape parses a backslash escape. -func (p *parser) parseEscape() (result string, err error) { - if len(p.s) < p.i+2 || p.s[p.i] != '\\' { - return "", errors.New("invalid escape sequence") - } - - start := p.i + 1 - c := p.s[start] - switch { - case c == '\r' || c == '\n' || c == '\f': - return "", errors.New("escaped line ending outside string") - case hexDigit(c): - // unicode escape (hex) - var i int - for i = start; i < p.i+6 && i < len(p.s) && hexDigit(p.s[i]); i++ { - // empty - } - v, _ := strconv.ParseUint(p.s[start:i], 16, 21) - if len(p.s) > i { - switch p.s[i] { - case '\r': - i++ - if len(p.s) > i && p.s[i] == '\n' { - i++ - } - case ' ', '\t', '\n', '\f': - i++ - } - } - p.i = i - return string(rune(v)), nil - } - - // Return the literal character after the backslash. - result = p.s[start : start+1] - p.i += 2 - return result, nil -} - -func hexDigit(c byte) bool { - return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' -} - -// nameStart returns whether c can be the first character of an identifier -// (not counting an initial hyphen, or an escape sequence). -func nameStart(c byte) bool { - return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127 -} - -// nameChar returns whether c can be a character within an identifier -// (not counting an escape sequence). -func nameChar(c byte) bool { - return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127 || - c == '-' || '0' <= c && c <= '9' -} - -// parseIdentifier parses an identifier. -func (p *parser) parseIdentifier() (result string, err error) { - startingDash := false - if len(p.s) > p.i && p.s[p.i] == '-' { - startingDash = true - p.i++ - } - - if len(p.s) <= p.i { - return "", errors.New("expected identifier, found EOF instead") - } - - if c := p.s[p.i]; !(nameStart(c) || c == '\\') { - return "", fmt.Errorf("expected identifier, found %c instead", c) - } - - result, err = p.parseName() - if startingDash && err == nil { - result = "-" + result - } - return -} - -// parseName parses a name (which is like an identifier, but doesn't have -// extra restrictions on the first character). -func (p *parser) parseName() (result string, err error) { - i := p.i -loop: - for i < len(p.s) { - c := p.s[i] - switch { - case nameChar(c): - start := i - for i < len(p.s) && nameChar(p.s[i]) { - i++ - } - result += p.s[start:i] - case c == '\\': - p.i = i - val, err := p.parseEscape() - if err != nil { - return "", err - } - i = p.i - result += val - default: - break loop - } - } - - if result == "" { - return "", errors.New("expected name, found EOF instead") - } - - p.i = i - return result, nil -} - -// parseString parses a single- or double-quoted string. -func (p *parser) parseString() (result string, err error) { - i := p.i - if len(p.s) < i+2 { - return "", errors.New("expected string, found EOF instead") - } - - quote := p.s[i] - i++ - -loop: - for i < len(p.s) { - switch p.s[i] { - case '\\': - if len(p.s) > i+1 { - switch c := p.s[i+1]; c { - case '\r': - if len(p.s) > i+2 && p.s[i+2] == '\n' { - i += 3 - continue loop - } - fallthrough - case '\n', '\f': - i += 2 - continue loop - } - } - p.i = i - val, err := p.parseEscape() - if err != nil { - return "", err - } - i = p.i - result += val - case quote: - break loop - case '\r', '\n', '\f': - return "", errors.New("unexpected end of line in string") - default: - start := i - for i < len(p.s) { - if c := p.s[i]; c == quote || c == '\\' || c == '\r' || c == '\n' || c == '\f' { - break - } - i++ - } - result += p.s[start:i] - } - } - - if i >= len(p.s) { - return "", errors.New("EOF in string") - } - - // Consume the final quote. - i++ - - p.i = i - return result, nil -} - -// parseRegex parses a regular expression; the end is defined by encountering an -// unmatched closing ')' or ']' which is not consumed -func (p *parser) parseRegex() (rx *regexp.Regexp, err error) { - i := p.i - if len(p.s) < i+2 { - return nil, errors.New("expected regular expression, found EOF instead") - } - - // number of open parens or brackets; - // when it becomes negative, finished parsing regex - open := 0 - -loop: - for i < len(p.s) { - switch p.s[i] { - case '(', '[': - open++ - case ')', ']': - open-- - if open < 0 { - break loop - } - } - i++ - } - - if i >= len(p.s) { - return nil, errors.New("EOF in regular expression") - } - rx, err = regexp.Compile(p.s[p.i:i]) - p.i = i - return rx, err -} - -// skipWhitespace consumes whitespace characters and comments. -// It returns true if there was actually anything to skip. -func (p *parser) skipWhitespace() bool { - i := p.i - for i < len(p.s) { - switch p.s[i] { - case ' ', '\t', '\r', '\n', '\f': - i++ - continue - case '/': - if strings.HasPrefix(p.s[i:], "/*") { - end := strings.Index(p.s[i+len("/*"):], "*/") - if end != -1 { - i += end + len("/**/") - continue - } - } - } - break - } - - if i > p.i { - p.i = i - return true - } - - return false -} - -// consumeParenthesis consumes an opening parenthesis and any following -// whitespace. It returns true if there was actually a parenthesis to skip. -func (p *parser) consumeParenthesis() bool { - if p.i < len(p.s) && p.s[p.i] == '(' { - p.i++ - p.skipWhitespace() - return true - } - return false -} - -// consumeClosingParenthesis consumes a closing parenthesis and any preceding -// whitespace. It returns true if there was actually a parenthesis to skip. -func (p *parser) consumeClosingParenthesis() bool { - i := p.i - p.skipWhitespace() - if p.i < len(p.s) && p.s[p.i] == ')' { - p.i++ - return true - } - p.i = i - return false -} - -// parseTypeSelector parses a type selector (one that matches by tag name). -func (p *parser) parseTypeSelector() (result Selector, err error) { - tag, err := p.parseIdentifier() - if err != nil { - return nil, err - } - - return typeSelector(tag), nil -} - -// parseIDSelector parses a selector that matches by id attribute. -func (p *parser) parseIDSelector() (Selector, error) { - if p.i >= len(p.s) { - return nil, fmt.Errorf("expected id selector (#id), found EOF instead") - } - if p.s[p.i] != '#' { - return nil, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i]) - } - - p.i++ - id, err := p.parseName() - if err != nil { - return nil, err - } - - return attributeEqualsSelector("id", id), nil -} - -// parseClassSelector parses a selector that matches by class attribute. -func (p *parser) parseClassSelector() (Selector, error) { - if p.i >= len(p.s) { - return nil, fmt.Errorf("expected class selector (.class), found EOF instead") - } - if p.s[p.i] != '.' { - return nil, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i]) - } - - p.i++ - class, err := p.parseIdentifier() - if err != nil { - return nil, err - } - - return attributeIncludesSelector("class", class), nil -} - -// parseAttributeSelector parses a selector that matches by attribute value. -func (p *parser) parseAttributeSelector() (Selector, error) { - if p.i >= len(p.s) { - return nil, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead") - } - if p.s[p.i] != '[' { - return nil, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i]) - } - - p.i++ - p.skipWhitespace() - key, err := p.parseIdentifier() - if err != nil { - return nil, err - } - - p.skipWhitespace() - if p.i >= len(p.s) { - return nil, errors.New("unexpected EOF in attribute selector") - } - - if p.s[p.i] == ']' { - p.i++ - return attributeExistsSelector(key), nil - } - - if p.i+2 >= len(p.s) { - return nil, errors.New("unexpected EOF in attribute selector") - } - - op := p.s[p.i : p.i+2] - if op[0] == '=' { - op = "=" - } else if op[1] != '=' { - return nil, fmt.Errorf(`expected equality operator, found "%s" instead`, op) - } - p.i += len(op) - - p.skipWhitespace() - if p.i >= len(p.s) { - return nil, errors.New("unexpected EOF in attribute selector") - } - var val string - var rx *regexp.Regexp - if op == "#=" { - rx, err = p.parseRegex() - } else { - switch p.s[p.i] { - case '\'', '"': - val, err = p.parseString() - default: - val, err = p.parseIdentifier() - } - } - if err != nil { - return nil, err - } - - p.skipWhitespace() - if p.i >= len(p.s) { - return nil, errors.New("unexpected EOF in attribute selector") - } - if p.s[p.i] != ']' { - return nil, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i]) - } - p.i++ - - switch op { - case "=": - return attributeEqualsSelector(key, val), nil - case "!=": - return attributeNotEqualSelector(key, val), nil - case "~=": - return attributeIncludesSelector(key, val), nil - case "|=": - return attributeDashmatchSelector(key, val), nil - case "^=": - return attributePrefixSelector(key, val), nil - case "$=": - return attributeSuffixSelector(key, val), nil - case "*=": - return attributeSubstringSelector(key, val), nil - case "#=": - return attributeRegexSelector(key, rx), nil - } - - return nil, fmt.Errorf("attribute operator %q is not supported", op) -} - -var errExpectedParenthesis = errors.New("expected '(' but didn't find it") -var errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it") -var errUnmatchedParenthesis = errors.New("unmatched '('") - -// parsePseudoclassSelector parses a pseudoclass selector like :not(p). -func (p *parser) parsePseudoclassSelector() (Selector, error) { - if p.i >= len(p.s) { - return nil, fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead") - } - if p.s[p.i] != ':' { - return nil, fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i]) - } - - p.i++ - name, err := p.parseIdentifier() - if err != nil { - return nil, err - } - name = toLowerASCII(name) - - switch name { - case "not", "has", "haschild": - if !p.consumeParenthesis() { - return nil, errExpectedParenthesis - } - sel, parseErr := p.parseSelectorGroup() - if parseErr != nil { - return nil, parseErr - } - if !p.consumeClosingParenthesis() { - return nil, errExpectedClosingParenthesis - } - - switch name { - case "not": - return negatedSelector(sel), nil - case "has": - return hasDescendantSelector(sel), nil - case "haschild": - return hasChildSelector(sel), nil - } - - case "contains", "containsown": - if !p.consumeParenthesis() { - return nil, errExpectedParenthesis - } - if p.i == len(p.s) { - return nil, errUnmatchedParenthesis - } - var val string - switch p.s[p.i] { - case '\'', '"': - val, err = p.parseString() - default: - val, err = p.parseIdentifier() - } - if err != nil { - return nil, err - } - val = strings.ToLower(val) - p.skipWhitespace() - if p.i >= len(p.s) { - return nil, errors.New("unexpected EOF in pseudo selector") - } - if !p.consumeClosingParenthesis() { - return nil, errExpectedClosingParenthesis - } - - switch name { - case "contains": - return textSubstrSelector(val), nil - case "containsown": - return ownTextSubstrSelector(val), nil - } - - case "matches", "matchesown": - if !p.consumeParenthesis() { - return nil, errExpectedParenthesis - } - rx, err := p.parseRegex() - if err != nil { - return nil, err - } - if p.i >= len(p.s) { - return nil, errors.New("unexpected EOF in pseudo selector") - } - if !p.consumeClosingParenthesis() { - return nil, errExpectedClosingParenthesis - } - - switch name { - case "matches": - return textRegexSelector(rx), nil - case "matchesown": - return ownTextRegexSelector(rx), nil - } - - case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type": - if !p.consumeParenthesis() { - return nil, errExpectedParenthesis - } - a, b, err := p.parseNth() - if err != nil { - return nil, err - } - if !p.consumeClosingParenthesis() { - return nil, errExpectedClosingParenthesis - } - if a == 0 { - switch name { - case "nth-child": - return simpleNthChildSelector(b, false), nil - case "nth-of-type": - return simpleNthChildSelector(b, true), nil - case "nth-last-child": - return simpleNthLastChildSelector(b, false), nil - case "nth-last-of-type": - return simpleNthLastChildSelector(b, true), nil - } - } - return nthChildSelector(a, b, - name == "nth-last-child" || name == "nth-last-of-type", - name == "nth-of-type" || name == "nth-last-of-type"), - nil - - case "first-child": - return simpleNthChildSelector(1, false), nil - case "last-child": - return simpleNthLastChildSelector(1, false), nil - case "first-of-type": - return simpleNthChildSelector(1, true), nil - case "last-of-type": - return simpleNthLastChildSelector(1, true), nil - case "only-child": - return onlyChildSelector(false), nil - case "only-of-type": - return onlyChildSelector(true), nil - case "input": - return inputSelector, nil - case "empty": - return emptyElementSelector, nil - case "root": - return rootSelector, nil - } - - return nil, fmt.Errorf("unknown pseudoclass :%s", name) -} - -// parseInteger parses a decimal integer. -func (p *parser) parseInteger() (int, error) { - i := p.i - start := i - for i < len(p.s) && '0' <= p.s[i] && p.s[i] <= '9' { - i++ - } - if i == start { - return 0, errors.New("expected integer, but didn't find it") - } - p.i = i - - val, err := strconv.Atoi(p.s[start:i]) - if err != nil { - return 0, err - } - - return val, nil -} - -// parseNth parses the argument for :nth-child (normally of the form an+b). -func (p *parser) parseNth() (a, b int, err error) { - // initial state - if p.i >= len(p.s) { - goto eof - } - switch p.s[p.i] { - case '-': - p.i++ - goto negativeA - case '+': - p.i++ - goto positiveA - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - goto positiveA - case 'n', 'N': - a = 1 - p.i++ - goto readN - case 'o', 'O', 'e', 'E': - id, nameErr := p.parseName() - if nameErr != nil { - return 0, 0, nameErr - } - id = toLowerASCII(id) - if id == "odd" { - return 2, 1, nil - } - if id == "even" { - return 2, 0, nil - } - return 0, 0, fmt.Errorf("expected 'odd' or 'even', but found '%s' instead", id) - default: - goto invalid - } - -positiveA: - if p.i >= len(p.s) { - goto eof - } - switch p.s[p.i] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - a, err = p.parseInteger() - if err != nil { - return 0, 0, err - } - goto readA - case 'n', 'N': - a = 1 - p.i++ - goto readN - default: - goto invalid - } - -negativeA: - if p.i >= len(p.s) { - goto eof - } - switch p.s[p.i] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - a, err = p.parseInteger() - if err != nil { - return 0, 0, err - } - a = -a - goto readA - case 'n', 'N': - a = -1 - p.i++ - goto readN - default: - goto invalid - } - -readA: - if p.i >= len(p.s) { - goto eof - } - switch p.s[p.i] { - case 'n', 'N': - p.i++ - goto readN - default: - // The number we read as a is actually b. - return 0, a, nil - } - -readN: - p.skipWhitespace() - if p.i >= len(p.s) { - goto eof - } - switch p.s[p.i] { - case '+': - p.i++ - p.skipWhitespace() - b, err = p.parseInteger() - if err != nil { - return 0, 0, err - } - return a, b, nil - case '-': - p.i++ - p.skipWhitespace() - b, err = p.parseInteger() - if err != nil { - return 0, 0, err - } - return a, -b, nil - default: - return a, 0, nil - } - -eof: - return 0, 0, errors.New("unexpected EOF while attempting to parse expression of form an+b") - -invalid: - return 0, 0, errors.New("unexpected character while attempting to parse expression of form an+b") -} - -// parseSimpleSelectorSequence parses a selector sequence that applies to -// a single element. -func (p *parser) parseSimpleSelectorSequence() (Selector, error) { - var result Selector - - if p.i >= len(p.s) { - return nil, errors.New("expected selector, found EOF instead") - } - - switch p.s[p.i] { - case '*': - // It's the universal selector. Just skip over it, since it doesn't affect the meaning. - p.i++ - case '#', '.', '[', ':': - // There's no type selector. Wait to process the other till the main loop. - default: - r, err := p.parseTypeSelector() - if err != nil { - return nil, err - } - result = r - } - -loop: - for p.i < len(p.s) { - var ns Selector - var err error - switch p.s[p.i] { - case '#': - ns, err = p.parseIDSelector() - case '.': - ns, err = p.parseClassSelector() - case '[': - ns, err = p.parseAttributeSelector() - case ':': - ns, err = p.parsePseudoclassSelector() - default: - break loop - } - if err != nil { - return nil, err - } - if result == nil { - result = ns - } else { - result = intersectionSelector(result, ns) - } - } - - if result == nil { - result = func(n *html.Node) bool { - return n.Type == html.ElementNode - } - } - - return result, nil -} - -// parseSelector parses a selector that may include combinators. -func (p *parser) parseSelector() (result Selector, err error) { - p.skipWhitespace() - result, err = p.parseSimpleSelectorSequence() - if err != nil { - return - } - - for { - var combinator byte - if p.skipWhitespace() { - combinator = ' ' - } - if p.i >= len(p.s) { - return - } - - switch p.s[p.i] { - case '+', '>', '~': - combinator = p.s[p.i] - p.i++ - p.skipWhitespace() - case ',', ')': - // These characters can't begin a selector, but they can legally occur after one. - return - } - - if combinator == 0 { - return - } - - c, err := p.parseSimpleSelectorSequence() - if err != nil { - return nil, err - } - - switch combinator { - case ' ': - result = descendantSelector(result, c) - case '>': - result = childSelector(result, c) - case '+': - result = siblingSelector(result, c, true) - case '~': - result = siblingSelector(result, c, false) - } - } - - panic("unreachable") -} - -// parseSelectorGroup parses a group of selectors, separated by commas. -func (p *parser) parseSelectorGroup() (result Selector, err error) { - result, err = p.parseSelector() - if err != nil { - return - } - - for p.i < len(p.s) { - if p.s[p.i] != ',' { - return result, nil - } - p.i++ - c, err := p.parseSelector() - if err != nil { - return nil, err - } - result = unionSelector(result, c) - } - - return -} diff --git a/vendor/github.com/andybalholm/cascadia/selector.go b/vendor/github.com/andybalholm/cascadia/selector.go deleted file mode 100644 index 9fb05cc..0000000 --- a/vendor/github.com/andybalholm/cascadia/selector.go +++ /dev/null @@ -1,622 +0,0 @@ -package cascadia - -import ( - "bytes" - "fmt" - "regexp" - "strings" - - "golang.org/x/net/html" -) - -// the Selector type, and functions for creating them - -// A Selector is a function which tells whether a node matches or not. -type Selector func(*html.Node) bool - -// hasChildMatch returns whether n has any child that matches a. -func hasChildMatch(n *html.Node, a Selector) bool { - for c := n.FirstChild; c != nil; c = c.NextSibling { - if a(c) { - return true - } - } - return false -} - -// hasDescendantMatch performs a depth-first search of n's descendants, -// testing whether any of them match a. It returns true as soon as a match is -// found, or false if no match is found. -func hasDescendantMatch(n *html.Node, a Selector) bool { - for c := n.FirstChild; c != nil; c = c.NextSibling { - if a(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) { - return true - } - } - return false -} - -// Compile parses a selector and returns, if successful, a Selector object -// that can be used to match against html.Node objects. -func Compile(sel string) (Selector, error) { - p := &parser{s: sel} - compiled, err := p.parseSelectorGroup() - if err != nil { - return nil, err - } - - if p.i < len(sel) { - return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i) - } - - return compiled, nil -} - -// MustCompile is like Compile, but panics instead of returning an error. -func MustCompile(sel string) Selector { - compiled, err := Compile(sel) - if err != nil { - panic(err) - } - return compiled -} - -// MatchAll returns a slice of the nodes that match the selector, -// from n and its children. -func (s Selector) MatchAll(n *html.Node) []*html.Node { - return s.matchAllInto(n, nil) -} - -func (s Selector) matchAllInto(n *html.Node, storage []*html.Node) []*html.Node { - if s(n) { - storage = append(storage, n) - } - - for child := n.FirstChild; child != nil; child = child.NextSibling { - storage = s.matchAllInto(child, storage) - } - - return storage -} - -// Match returns true if the node matches the selector. -func (s Selector) Match(n *html.Node) bool { - return s(n) -} - -// MatchFirst returns the first node that matches s, from n and its children. -func (s Selector) MatchFirst(n *html.Node) *html.Node { - if s.Match(n) { - return n - } - - for c := n.FirstChild; c != nil; c = c.NextSibling { - m := s.MatchFirst(c) - if m != nil { - return m - } - } - return nil -} - -// Filter returns the nodes in nodes that match the selector. -func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) { - for _, n := range nodes { - if s(n) { - result = append(result, n) - } - } - return result -} - -// typeSelector returns a Selector that matches elements with a given tag name. -func typeSelector(tag string) Selector { - tag = toLowerASCII(tag) - return func(n *html.Node) bool { - return n.Type == html.ElementNode && n.Data == tag - } -} - -// toLowerASCII returns s with all ASCII capital letters lowercased. -func toLowerASCII(s string) string { - var b []byte - for i := 0; i < len(s); i++ { - if c := s[i]; 'A' <= c && c <= 'Z' { - if b == nil { - b = make([]byte, len(s)) - copy(b, s) - } - b[i] = s[i] + ('a' - 'A') - } - } - - if b == nil { - return s - } - - return string(b) -} - -// attributeSelector returns a Selector that matches elements -// where the attribute named key satisifes the function f. -func attributeSelector(key string, f func(string) bool) Selector { - key = toLowerASCII(key) - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - for _, a := range n.Attr { - if a.Key == key && f(a.Val) { - return true - } - } - return false - } -} - -// attributeExistsSelector returns a Selector that matches elements that have -// an attribute named key. -func attributeExistsSelector(key string) Selector { - return attributeSelector(key, func(string) bool { return true }) -} - -// attributeEqualsSelector returns a Selector that matches elements where -// the attribute named key has the value val. -func attributeEqualsSelector(key, val string) Selector { - return attributeSelector(key, - func(s string) bool { - return s == val - }) -} - -// attributeNotEqualSelector returns a Selector that matches elements where -// the attribute named key does not have the value val. -func attributeNotEqualSelector(key, val string) Selector { - key = toLowerASCII(key) - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - for _, a := range n.Attr { - if a.Key == key && a.Val == val { - return false - } - } - return true - } -} - -// attributeIncludesSelector returns a Selector that matches elements where -// the attribute named key is a whitespace-separated list that includes val. -func attributeIncludesSelector(key, val string) Selector { - return attributeSelector(key, - func(s string) bool { - for s != "" { - i := strings.IndexAny(s, " \t\r\n\f") - if i == -1 { - return s == val - } - if s[:i] == val { - return true - } - s = s[i+1:] - } - return false - }) -} - -// attributeDashmatchSelector returns a Selector that matches elements where -// the attribute named key equals val or starts with val plus a hyphen. -func attributeDashmatchSelector(key, val string) Selector { - return attributeSelector(key, - func(s string) bool { - if s == val { - return true - } - if len(s) <= len(val) { - return false - } - if s[:len(val)] == val && s[len(val)] == '-' { - return true - } - return false - }) -} - -// attributePrefixSelector returns a Selector that matches elements where -// the attribute named key starts with val. -func attributePrefixSelector(key, val string) Selector { - return attributeSelector(key, - func(s string) bool { - if strings.TrimSpace(s) == "" { - return false - } - return strings.HasPrefix(s, val) - }) -} - -// attributeSuffixSelector returns a Selector that matches elements where -// the attribute named key ends with val. -func attributeSuffixSelector(key, val string) Selector { - return attributeSelector(key, - func(s string) bool { - if strings.TrimSpace(s) == "" { - return false - } - return strings.HasSuffix(s, val) - }) -} - -// attributeSubstringSelector returns a Selector that matches nodes where -// the attribute named key contains val. -func attributeSubstringSelector(key, val string) Selector { - return attributeSelector(key, - func(s string) bool { - if strings.TrimSpace(s) == "" { - return false - } - return strings.Contains(s, val) - }) -} - -// attributeRegexSelector returns a Selector that matches nodes where -// the attribute named key matches the regular expression rx -func attributeRegexSelector(key string, rx *regexp.Regexp) Selector { - return attributeSelector(key, - func(s string) bool { - return rx.MatchString(s) - }) -} - -// intersectionSelector returns a selector that matches nodes that match -// both a and b. -func intersectionSelector(a, b Selector) Selector { - return func(n *html.Node) bool { - return a(n) && b(n) - } -} - -// unionSelector returns a selector that matches elements that match -// either a or b. -func unionSelector(a, b Selector) Selector { - return func(n *html.Node) bool { - return a(n) || b(n) - } -} - -// negatedSelector returns a selector that matches elements that do not match a. -func negatedSelector(a Selector) Selector { - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - return !a(n) - } -} - -// writeNodeText writes the text contained in n and its descendants to b. -func writeNodeText(n *html.Node, b *bytes.Buffer) { - switch n.Type { - case html.TextNode: - b.WriteString(n.Data) - case html.ElementNode: - for c := n.FirstChild; c != nil; c = c.NextSibling { - writeNodeText(c, b) - } - } -} - -// nodeText returns the text contained in n and its descendants. -func nodeText(n *html.Node) string { - var b bytes.Buffer - writeNodeText(n, &b) - return b.String() -} - -// nodeOwnText returns the contents of the text nodes that are direct -// children of n. -func nodeOwnText(n *html.Node) string { - var b bytes.Buffer - for c := n.FirstChild; c != nil; c = c.NextSibling { - if c.Type == html.TextNode { - b.WriteString(c.Data) - } - } - return b.String() -} - -// textSubstrSelector returns a selector that matches nodes that -// contain the given text. -func textSubstrSelector(val string) Selector { - return func(n *html.Node) bool { - text := strings.ToLower(nodeText(n)) - return strings.Contains(text, val) - } -} - -// ownTextSubstrSelector returns a selector that matches nodes that -// directly contain the given text -func ownTextSubstrSelector(val string) Selector { - return func(n *html.Node) bool { - text := strings.ToLower(nodeOwnText(n)) - return strings.Contains(text, val) - } -} - -// textRegexSelector returns a selector that matches nodes whose text matches -// the specified regular expression -func textRegexSelector(rx *regexp.Regexp) Selector { - return func(n *html.Node) bool { - return rx.MatchString(nodeText(n)) - } -} - -// ownTextRegexSelector returns a selector that matches nodes whose text -// directly matches the specified regular expression -func ownTextRegexSelector(rx *regexp.Regexp) Selector { - return func(n *html.Node) bool { - return rx.MatchString(nodeOwnText(n)) - } -} - -// hasChildSelector returns a selector that matches elements -// with a child that matches a. -func hasChildSelector(a Selector) Selector { - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - return hasChildMatch(n, a) - } -} - -// hasDescendantSelector returns a selector that matches elements -// with any descendant that matches a. -func hasDescendantSelector(a Selector) Selector { - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - return hasDescendantMatch(n, a) - } -} - -// nthChildSelector returns a selector that implements :nth-child(an+b). -// If last is true, implements :nth-last-child instead. -// If ofType is true, implements :nth-of-type instead. -func nthChildSelector(a, b int, last, ofType bool) Selector { - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - - parent := n.Parent - if parent == nil { - return false - } - - if parent.Type == html.DocumentNode { - return false - } - - i := -1 - count := 0 - for c := parent.FirstChild; c != nil; c = c.NextSibling { - if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) { - continue - } - count++ - if c == n { - i = count - if !last { - break - } - } - } - - if i == -1 { - // This shouldn't happen, since n should always be one of its parent's children. - return false - } - - if last { - i = count - i + 1 - } - - i -= b - if a == 0 { - return i == 0 - } - - return i%a == 0 && i/a >= 0 - } -} - -// simpleNthChildSelector returns a selector that implements :nth-child(b). -// If ofType is true, implements :nth-of-type instead. -func simpleNthChildSelector(b int, ofType bool) Selector { - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - - parent := n.Parent - if parent == nil { - return false - } - - if parent.Type == html.DocumentNode { - return false - } - - count := 0 - for c := parent.FirstChild; c != nil; c = c.NextSibling { - if c.Type != html.ElementNode || (ofType && c.Data != n.Data) { - continue - } - count++ - if c == n { - return count == b - } - if count >= b { - return false - } - } - return false - } -} - -// simpleNthLastChildSelector returns a selector that implements -// :nth-last-child(b). If ofType is true, implements :nth-last-of-type -// instead. -func simpleNthLastChildSelector(b int, ofType bool) Selector { - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - - parent := n.Parent - if parent == nil { - return false - } - - if parent.Type == html.DocumentNode { - return false - } - - count := 0 - for c := parent.LastChild; c != nil; c = c.PrevSibling { - if c.Type != html.ElementNode || (ofType && c.Data != n.Data) { - continue - } - count++ - if c == n { - return count == b - } - if count >= b { - return false - } - } - return false - } -} - -// onlyChildSelector returns a selector that implements :only-child. -// If ofType is true, it implements :only-of-type instead. -func onlyChildSelector(ofType bool) Selector { - return func(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - - parent := n.Parent - if parent == nil { - return false - } - - if parent.Type == html.DocumentNode { - return false - } - - count := 0 - for c := parent.FirstChild; c != nil; c = c.NextSibling { - if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) { - continue - } - count++ - if count > 1 { - return false - } - } - - return count == 1 - } -} - -// inputSelector is a Selector that matches input, select, textarea and button elements. -func inputSelector(n *html.Node) bool { - return n.Type == html.ElementNode && (n.Data == "input" || n.Data == "select" || n.Data == "textarea" || n.Data == "button") -} - -// emptyElementSelector is a Selector that matches empty elements. -func emptyElementSelector(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - - for c := n.FirstChild; c != nil; c = c.NextSibling { - switch c.Type { - case html.ElementNode, html.TextNode: - return false - } - } - - return true -} - -// descendantSelector returns a Selector that matches an element if -// it matches d and has an ancestor that matches a. -func descendantSelector(a, d Selector) Selector { - return func(n *html.Node) bool { - if !d(n) { - return false - } - - for p := n.Parent; p != nil; p = p.Parent { - if a(p) { - return true - } - } - - return false - } -} - -// childSelector returns a Selector that matches an element if -// it matches d and its parent matches a. -func childSelector(a, d Selector) Selector { - return func(n *html.Node) bool { - return d(n) && n.Parent != nil && a(n.Parent) - } -} - -// siblingSelector returns a Selector that matches an element -// if it matches s2 and in is preceded by an element that matches s1. -// If adjacent is true, the sibling must be immediately before the element. -func siblingSelector(s1, s2 Selector, adjacent bool) Selector { - return func(n *html.Node) bool { - if !s2(n) { - return false - } - - if adjacent { - for n = n.PrevSibling; n != nil; n = n.PrevSibling { - if n.Type == html.TextNode || n.Type == html.CommentNode { - continue - } - return s1(n) - } - return false - } - - // Walk backwards looking for element that matches s1 - for c := n.PrevSibling; c != nil; c = c.PrevSibling { - if s1(c) { - return true - } - } - - return false - } -} - -// rootSelector implements :root -func rootSelector(n *html.Node) bool { - if n.Type != html.ElementNode { - return false - } - if n.Parent == nil { - return false - } - return n.Parent.Type == html.DocumentNode -} diff --git a/vendor/github.com/baidubce/bce-sdk-go/LICENSE b/vendor/github.com/baidubce/bce-sdk-go/LICENSE deleted file mode 100644 index f433b1a..0000000 --- a/vendor/github.com/baidubce/bce-sdk-go/LICENSE +++ /dev/null @@ -1,177 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/baidubce/bce-sdk-go/README.md b/vendor/github.com/baidubce/bce-sdk-go/README.md deleted file mode 100644 index 7594896..0000000 --- a/vendor/github.com/baidubce/bce-sdk-go/README.md +++ /dev/null @@ -1,1589 +0,0 @@ -# GO SDK 文档 - -# 概述 - -本文档主要介绍BOS GO SDK的安装和使用。在使用本文档前,您需要先了解BOS的一些基本知识,并已开通了BOS服务。若您还不了解BOS,可以参考[产品描述](https://cloud.baidu.com/doc/BOS/ProductDescription.html)和[入门指南](https://cloud.baidu.com/doc/BOS/GettingStarted-new.html)。 - -# 安装SDK工具包 - -## 运行环境 - -GO SDK可以在go1.3及以上环境下运行。 - -## 安装SDK - -**直接从github下载** - -使用`go get`工具从github进行下载: - -```shell -go get github.com/baidubce/bce-sdk-go -``` - -**SDK目录结构** - -```text -bce-sdk-go -|--auth //BCE签名和权限认证 -|--bce //BCE公用基础组件 -|--http //BCE的http通信模块 -|--services //BCE相关服务目录 -| |--bos //BOS服务目录 -| | |--bos_client.go //BOS客户端入口 -| | |--api //BOS相关API目录 -| | |--bucket.go //BOS的Bucket相关API实现 -| | |--object.go //BOS的Object相关API实现 -| | |--multipart.go //BOS的Multipart相关API实现 -| | |--module.go //BOS相关API的数据模型 -| | |--util.go //BOS相关API实现使用的工具 -| |--sts //STS服务目录 -|--util //BCE公用的工具实现 -``` - -## 卸载SDK - -预期卸载SDK时,删除下载的源码即可。 - - -# 初始化 - -## 确认Endpoint - -在确认您使用SDK时配置的Endpoint时,可先阅读开发人员指南中关于[BOS访问域名](https://cloud.baidu.com/doc/BOS/DevRef.html#BOS.E8.AE.BF.E9.97.AE.E5.9F.9F.E5.90.8D)的部分,理解Endpoint相关的概念。百度云目前开放了多区域支持,请参考[区域选择说明](https://cloud.baidu.com/doc/Reference/Regions.html)。 - -目前支持“华北-北京”、“华南-广州”和“华东-苏州”三个区域。北京区域:`http://bj.bcebos.com`,广州区域:`http://gz.bcebos.com`,苏州区域:`http://su.bcebos.com`。对应信息为: - -访问区域 | 对应Endpoint ----|--- -BJ | bj.bcebos.com -GZ | gz.bcebos.com -SU | su.bcebos.com - -## 获取密钥 - -要使用百度云BOS,您需要拥有一个有效的AK(Access Key ID)和SK(Secret Access Key)用来进行签名认证。AK/SK是由系统分配给用户的,均为字符串,用于标识用户,为访问BOS做签名验证。 - -可以通过如下步骤获得并了解您的AK/SK信息: - -[注册百度云账号](https://login.bce.baidu.com/reg.html?tpl=bceplat&from=portal) - -[创建AK/SK](https://console.bce.baidu.com/iam/?_=1513940574695#/iam/accesslist) - -## 新建BOS Client - -BOS Client是BOS服务的客户端,为开发者与BOS服务进行交互提供了一系列的方法。 - -### 使用AK/SK新建BOS Client - -通过AK/SK方式访问BOS,用户可以参考如下代码新建一个BOS Client: - -```go -import ( - "github.com/baidubce/bce-sdk-go/services/bos" -) - -func main() { - // 用户的Access Key ID和Secret Access Key - ACCESS_KEY_ID, SECRET_ACCESS_KEY := , - - // 用户指定的Endpoint - ENDPOINT := - - // 初始化一个BosClient - bosClient, err := bos.NewClient(AK, SK, ENDPOINT) -} -``` - -在上面代码中,`ACCESS_KEY_ID`对应控制台中的“Access Key ID”,`SECRET_ACCESS_KEY`对应控制台中的“Access Key Secret”,获取方式请参考《操作指南 [管理ACCESSKEY](https://cloud.baidu.com/doc/BOS/GettingStarted.html#.E7.AE.A1.E7.90.86ACCESSKEY)》。第三个参数`ENDPOINT`支持用户自己指定域名,如果设置为空字符串,会使用默认域名作为BOS的服务地址。 - -> **注意:**`ENDPOINT`参数需要用指定区域的域名来进行定义,如服务所在区域为北京,则为`http://bj.bcebos.com`。 - -### 使用STS创建BOS Client - -**申请STS token** - -BOS可以通过STS机制实现第三方的临时授权访问。STS(Security Token Service)是百度云提供的临时授权服务。通过STS,您可以为第三方用户颁发一个自定义时效和权限的访问凭证。第三方用户可以使用该访问凭证直接调用百度云的API或SDK访问百度云资源。 - -通过STS方式访问BOS,用户需要先通过STS的client申请一个认证字符串,申请方式可参见[百度云STS使用介绍](https://cloud.baidu.com/doc/BOS/API.html#STS.E7.AE.80.E4.BB.8B)。 - -**用STS token新建BOS Client** - -申请好STS后,可将STS Token配置到BOS Client中,从而实现通过STS Token创建BOS Client。 - -**代码示例** - -GO SDK实现了STS服务的接口,用户可以参考如下完整代码,实现申请STS Token和创建BOS Client对象: - -```go -import ( - "fmt" - - "github.com/baidubce/bce-sdk-go/auth" //导入认证模块 - "github.com/baidubce/bce-sdk-go/services/bos" //导入BOS服务模块 - "github.com/baidubce/bce-sdk-go/services/sts" //导入STS服务模块 -) - -func main() { - // 创建STS服务的Client对象,Endpoint使用默认值 - AK, SK := , - stsClient, err := sts.NewClient(AK, SK) - if err != nil { - fmt.Println("create sts client object :", err) - return - } - - // 获取临时认证token,有效期为60秒,ACL为空 - sts, err := stsClient.GetSessionToken(60, "") - if err != nil { - fmt.Println("get session token failed:", err) - return - } - fmt.Println("GetSessionToken result:") - fmt.Println(" accessKeyId:", sts.AccessKeyId) - fmt.Println(" secretAccessKey:", sts.SecretAccessKey) - fmt.Println(" sessionToken:", sts.SessionToken) - fmt.Println(" createTime:", sts.CreateTime) - fmt.Println(" expiration:", sts.Expiration) - fmt.Println(" userId:", sts.UserId) - - // 使用申请的临时STS创建BOS服务的Client对象,Endpoint使用默认值 - bosClient, err := bos.NewClient(sts.AccessKeyId, sts.SecretAccessKey, "") - if err != nil { - fmt.Println("create bos client failed:", err) - return - } - stsCredential, err := auth.NewSessionBceCredentials( - sts.AccessKeyId, - sts.SecretAccessKey, - sts.SessionToken) - if err != nil { - fmt.Println("create sts credential object failed:", err) - return - } - bosClient.Config.Credentials = stsCredential -} -``` - -> 注意: -> 目前使用STS配置BOS Client时,无论对应BOS服务的Endpoint在哪里,STS的Endpoint都需配置为http://sts.bj.baidubce.com。上述代码中创建STS对象时使用此默认值。 - -## 配置HTTPS协议访问BOS - -BOS支持HTTPS传输协议,您可以通过在创建BOS Client对象时指定的Endpoint中指明HTTPS的方式,在BOS GO SDK中使用HTTPS访问BOS服务: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos" - -ENDPOINT := "https://bj.bcebos.com" //指明使用HTTPS协议 -AK, SK := , -bosClient, _ := bos.NewClient(AK, SK, ENDPOINT) -``` - -## 配置BOS Client - -如果用户需要配置BOS Client的一些细节的参数,可以在创建BOS Client对象之后,使用该对象的导出字段`Config`进行自定义配置,可以为客户端配置代理,最大连接数等参数。 - -### 使用代理 - -下面一段代码可以让客户端使用代理访问BOS服务: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos" - -//创建BOS Client对象 -AK, SK := , -ENDPOINT := "bj.bcebos.com" -client, _ := bos.NewClient(AK, SK, ENDPOINT) - -//代理使用本地的8080端口 -client.Config.ProxyUrl = "127.0.0.1:8080" -``` - -### 设置网络参数 - -用户可以通过如下的示例代码进行网络参数的设置: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos" - -AK, SK := , -ENDPOINT := "bj.bcebos.com" -client, _ := bos.NewClient(AK, SK, ENDPOINT) - -// 配置不进行重试,默认为Back Off重试 -client.Config.Retry = bce.NewNoRetryPolicy() - -// 配置连接超时时间为30秒 -client.Config.ConnectionTimeoutInMillis = 30 * 1000 -``` - -### 配置生成签名字符串选项 - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos" - -AK, SK := , -ENDPOINT := "bj.bcebos.com" -client, _ := bos.NewClient(AK, SK, ENDPOINT) - -// 配置签名使用的HTTP请求头为`Host` -headersToSign := map[string]struct{}{"Host": struct{}{}} -client.Config.SignOption.HeadersToSign = HeadersToSign - -// 配置签名的有效期为30秒 -client.Config.SignOption.ExpireSeconds = 30 -``` - -**参数说明** - -用户使用GO SDK访问BOS时,创建的BOS Client对象的`Config`字段支持的所有参数如下表所示: - -配置项名称 | 类型 | 含义 ------------|---------|-------- -Endpoint | string | 请求服务的域名 -ProxyUrl | string | 客户端请求的代理地址 -Region | string | 请求资源的区域 -UserAgent | string | 用户名称,HTTP请求的User-Agent头 -Credentials| \*auth.BceCredentials | 请求的鉴权对象,分为普通AK/SK与STS两种 -SignOption | \*auth.SignOptions | 认证字符串签名选项 -Retry | RetryPolicy | 连接重试策略 -ConnectionTimeoutInMillis| int | 连接超时时间,单位毫秒,默认20分钟 - -说明: - - 1. `Credentials`字段使用`auth.NewBceCredentials`与`auth.NewSessionBceCredentials`函数创建,默认使用前者,后者为使用STS鉴权时使用,详见“使用STS创建BOS Client”小节。 - 2. `SignOption`字段为生成签名字符串时的选项,详见下表说明: - -名称 | 类型 | 含义 ---------------|-------|----------- -HeadersToSign |map[string]struct{} | 生成签名字符串时使用的HTTP头 -Timestamp | int64 | 生成的签名字符串中使用的时间戳,默认使用请求发送时的值 -ExpireSeconds | int | 签名字符串的有效期 - - 其中,HeadersToSign默认为`Host`,`Content-Type`,`Content-Length`,`Content-MD5`;TimeStamp一般为零值,表示使用调用生成认证字符串时的时间戳,用户一般不应该明确指定该字段的值;ExpireSeconds默认为1800秒即30分钟。 - 3. `Retry`字段指定重试策略,目前支持两种:`NoRetryPolicy`和`BackOffRetryPolicy`。默认使用后者,该重试策略是指定最大重试次数、最长重试时间和重试基数,按照重试基数乘以2的指数级增长的方式进行重试,直到达到最大重试测试或者最长重试时间为止。 - - -# Bucket管理 - -Bucket既是BOS上的命名空间,也是计费、权限控制、日志记录等高级功能的管理实体。 - -- Bucket名称在所有区域中具有全局唯一性,且不能修改。 - -> **说明:** -> -> 百度云目前开放了多区域支持,请参考[区域选择说明](https://cloud.baidu.com/doc/Reference/Regions.html)。 -> 目前支持“华北-北京”、“华南-广州”和“华东-苏州”三个区域。北京区域:`http://bj.bcebos.com`,广州区域:`http://gz.bcebos.com`,苏州区域:`http://su.bcebos.com`。 - -- 存储在BOS上的每个Object都必须包含在一个Bucket中。 -- 一个用户最多可创建100个Bucket,但每个Bucket中存放的Object的数量和大小总和没有限制,用户不需要考虑数据的可扩展性。 - -## Bucket权限管理 - -### 设置Bucket的访问权限 - -如下代码将Bucket的权限设置为了private。 - -```go -err := bosClient.PutBucketAclFromCanned(bucketName, "private") -``` - -用户可设置的CannedACL包含三个值:`private` 、`public-read` 、`public-read-write`,它们分别对应相关权限。具体内容可以参考BOS API文档 [使用CannedAcl方式的权限控制](https://cloud.baidu.com/doc/BOS/API.html#.4F.FA.21.55.58.27.F8.31.85.2D.01.55.89.10.A7.16)。 - -### 设置指定用户对Bucket的访问权限 - -BOS还可以实现设置指定用户对Bucket的访问权限,参考如下代码实现: - -```go -// import "github.com/baidubce/bce-sdk-go/bce" -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -// 1. 直接上传ACL文件流 -aclBodyStream := bce.NewBodyFromFile("") -err := bosClient.PutBucketAcl(bucket, aclBodyStream) - -// 2. 直接使用ACL json字符串 -aclString := `{ - "accessControlList":[ - { - "grantee":[{ - "id":"e13b12d0131b4c8bae959df4969387b8" //指定用户ID - }], - "permission":["FULL_CONTROL"] //指定用户权限 - } - ] -}` -err := bosClient.PutBucketAclFromString(bucket, aclString) - -// 3. 使用ACL文件 -err := bosClient.PutBucketAclFromFile(bucket, "") - -// 4. 使用ACL struct对象设置 -grantUser1 := api.GranteeType{""} -grantUser2 := api.GranteeType{""} -grant1 := api.GrantType{ - Grantee: []api.GranteeType{grantUser1}, - Permission: []string{"FULL_CONTROL"} -} -grant2 := api.GrantType{ - Grantee: []api.GranteeType{granteUser2}, - Permission: []string{"READ"} -} -grantArr := make([]api.GranteType) -grantArr = append(grantArr, grant1) -grantArr = append(grantArr, grant2) -args := &api.PutBucketAclArgs{grantArr} -err := bosClient.PutBucketAclFromStruct(bucketName, args) -``` - -> **注意:** -> Permission中的权限设置包含三个值:`READ`、`WRITE`、`FULL_CONTROL`,它们分别对应相关权限。具体内容可以参考BOS API文档 [上传ACL文件方式的权限控制](https://cloud.baidu.com/doc/BOS/API.html#.D4.56.61.2C.A5.B1.68.B6.42.32.3E.18.15.BD.CE.43)。 -> ACL规则比较复杂,直接编辑ACL的文件或JSON字符串比较困难,因此提供了第四种方式方便使用代码创建ACL规则。 - -### 设置更多Bucket访问权限 - -1. 通过设置referer白名单方式设置防盗链 - -```go -aclString := `{ - "accessControlList":[ - { - "grantee":[{"id":"*"]}, //指定用户ID为全部用户 - "permission":["FULL_CONTROL"], //指定用户权限 - "condition":[{"referer": {"stringEquals": "http://allowed-domain/"}}] - } - ] -}` -err := bosClient.PutBucketAclFromString(bucket, aclString) -``` - -2. 限制客户端IP访问,只允许部分客户端IP访问 - -```go -aclString := `{ - "accessControlList":[ - { - "grantee":[{"id":"*"]}, //指定用户ID为全部用户 - "permission":["READ"], //指定用户权限 - "condition":[{"ipAddress": ["ip-1", "ip-2"]}] - } - ] -}` -err := bosClient.PutBucketAclFromString(bucket, aclString) -``` - -### 设置STS临时token权限 - -对于通过STS方式创建的临时访问身份,管理员也可进行专门的权限设定。 - -STS的简介及设置临时权限的方式可参见[临时授权访问](https://cloud.baidu.com/doc/BOS/API.html#.E4.B8.B4.E6.97.B6.E6.8E.88.E6.9D.83.E8.AE.BF.E9.97.AE)。 - -使用BOS GO SDK设置STS临时token权限可参考如下示例: - -```go -// import "github.com/baidubce/bce-sdk-go/services/sts" - -AK, SK := , -stsClient, err := sts.NewClient(AK, SK) -aclString := `{ - "accessControlList":[ - { - "grantee":[{"id":"*"]}, //指定用户ID为全部用户 - "permission":["FULL_CONTROL"], //指定用户权限 - "condition":[{"referer": {"stringEquals": "http://allowed-domain/"}}] - } - ] -}` -//使用有效期为300秒且指定ACL的方式获取临时STS token -sts, err := stsClient.GetSessionToken(300, aclString) -``` - -## 查看Bucket所属的区域 - -Bucket Location即Bucket Region,百度云支持的各region详细信息可参见[区域选择说明](https://cloud.baidu.com/doc/Reference/Regions.html)。 - -如下代码可以获取该Bucket的Location信息: - -```go -location, err := bosClient.GetBucketLocation(bucketName) -``` - -## 新建Bucket - -如下代码可以新建一个Bucket: - -```go -// 新建Bucket的接口为PutBucket,需指定Bucket名称 -if loc, err := bosClient.PutBucket(); err != nil { - fmt.Println("create bucket failed:", err) -} else { - fmt.Println("create bucket success at location:", loc) -} -``` - -> **注意:** 由于Bucket的名称在所有区域中是唯一的,所以需要保证bucketName不与其他所有区域上的Bucket名称相同。 -> -> Bucket的命名有以下规范: -> - 只能包括小写字母,数字,短横线(-)。 -> - 必须以小写字母或者数字开头。 -> - 长度必须在3-63字节之间。 - -## 列举Bucket - -如下代码可以列出用户所有的Bucket: - -```go -if res, err := bosClient.ListBuckets(); err != nil { - fmt.Println("list buckets failed:", err) -} else { - fmt.Println("owner:", res.Owner) - for i, b := range res.Buckets { - fmt.Println("bucket", i) - fmt.Println(" Name:", b.Name) - fmt.Println(" Location:", b.Location) - fmt.Println(" CreationDate:", b.CreationDate) - } -} -``` - -## 删除Bucket - -如下代码可以删除一个Bucket: - -```go -err := bosClient.DeleteBucket(bucketName) -``` - -> **注意:** -> - 在删除前需要保证此Bucket下的所有Object已经已被删除,否则会删除失败。 -> - 在删除前确认该Bucket没有开通跨区域复制,不是跨区域复制规则中的源Bucket或目标Bucket,否则不能删除。 - -## 判断Bucket是否存在 - -若用户需要判断某个Bucket是否存在,则如下代码可以做到: - -```go -exists, err := bosClient.DoesBucketExist(bucketName) -if err == nil && exists { - fmt.Println("Bucket exists") -} else { - fmt.Println("Bucket not exists") -} -``` - - -> **注意:** -> 如果Bucket不为空(即Bucket中有Object存在),则Bucket无法被删除,必须清空Bucket后才能成功删除。 - - -# 文件管理 - -## 上传文件 - -在BOS中,用户操作的基本数据单元是Object。Object包含Key、Meta和Data。其中,Key是Object的名字;Meta是用户对该Object的描述,由一系列Name-Value对组成;Data是Object的数据。 - -BOS GO SDK提供了丰富的文件上传接口,可以通过以下方式上传文件: - -- 简单上传 -- 追加上传 -- 抓取上传 -- 分块上传 - -### 简单上传 - -BOS在简单上传的场景中,支持以指定文件形式、以数据流方式、以二进制串方式、以字符串方式执行Object上传,请参考如下代码: - -```go -// import "github.com/baidubce/bce-sdk-go/bce" - -// 从本地文件上传 -etag, err := bosClient.PutObjectFromFile(bucketName, objectName, fileName, nil) - -// 从字符串上传 -str := "test put object" -etag, err := bosClient.PutObjectFromString(bucketName, objectName, str, nil) - -// 从字节数组上传 -byteArr := []byte("test put object") -etag, err := bosClient.PutObjectFromBytes(bucketName, objectName, byteArr, nil) - -// 从数据流上传 -bodyStream, err := bce.NewBodyFromFile(fileName) -etag, err := bosClient.PutObject(bucketName, objectName, bodyStream, nil) - -// 使用基本接口,提供必需参数从数据流上传 -bodyStream, err := bce.NewBodyFromFile(fileName) -etag, err := bosClient.BasicPutObject(bucketName, objectName, bodyStream) -``` - -Object以文件的形式上传到BOS中,上述简单上传的接口支持不超过5GB的Object上传。在请求处理成功后,BOS会在Header中返回Object的ETag作为文件标识。 - -**设置文件元信息** - -文件元信息(Object Meta),是对用户在向BOS上传文件时,同时对文件进行的属性描述,主要分为分为两种:设置HTTP标准属性(HTTP Headers)和用户自定义的元信息。 - -***设定Object的Http Header*** - -BOS GO SDK本质上是调用后台的HTTP接口,因此用户可以在上传文件时自定义Object的Http Header。常用的http header说明如下: - -名称 | 描述 |默认值 ----|---|--- -Content-MD5 | 文件数据校验,设置后BOS会启用文件内容MD5校验,把您提供的MD5与文件的MD5比较,不一致会抛出错误 | 有 -Content-Type | 文件的MIME,定义文件的类型及网页编码,决定浏览器将以什么形式、什么编码读取文件。如没有指定,BOS则根据文件的扩展名自动生成,如文件没有扩展名则填默认值 | application/octet-stream -Content-Disposition | 指示MIME用户代理如何显示附加的文件,打开或下载,及文件名称 | 无 -Content-Length | 上传的文件的长度,超过流/文件的长度会截断,不足为实际值 | 流/文件的长度 -Expires| 缓存过期时间 | 无 -Cache-Control | 指定该Object被下载时的网页的缓存行为 | 无 - -参考代码如下: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.PutObjectArgs) - -// 设置上传内容的MIME类型 -args.ContentType = "text/javascript" - -// 设置上传内容的长度 -args.ContentLength = 1024 - -// 设置缓存过期时间 -args.Expires = "Mon, 19 Mar 2018 11:55:32 GMT" - -// 设置缓存行为 -args.CacheControl = "max-age=3600" - -etag, err := bosClient.PutObject(bucketName, objectName, bodyStream, args) -``` - -> 注意:用户上传对象时SDK会自动设置ContentLength和ContentMD5,用来保证数据的正确性。如果用户自行设定ContentLength,必须为大于等于0且小于等于实际对象大小的数值,从而上传截断部分的内容,为负数或大于实际大小均报错。 - -***用户自定义元信息*** - -BOS支持用户自定义元数据来对Object进行描述。如下代码所示: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.PutObjectArgs) - -// 设置用户自定义元数据 -args.UserMeta = map[string]string{ - "name1": "my-metadata1", - "name2": "my-metadata2", -} - -etag, err := bosClient.PutObject(bucketName, objectName, bodyStream, args) -``` - -> **提示:** -> - 在上面代码中,用户自定义了一个名字为“name1”和“name2”,值分别为“my-metadata1”和“my-metadata2”的元数据 -> - 当用户下载此Object的时候,此元数据也可以一并得到 -> - 一个Object可以有多个类似的参数,但所有的User Meta总大小不能超过2KB - -**上传Object时设置存储类型** - -BOS支持标准存储、低频存储和冷存储,上传Object并存储为低频存储类型通过指定StorageClass实现,三种存储类型对应的参数如下: - -存储类型 | 参数 ----|--- -标准存储 | STANDRAD -低频存储 | STANDARD_IA -冷存储 | COLD - -以低频存储为例,代码如下: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.PutObjectArgs) -args.StorageClass = api.STORAGE_CLASS_STANDARD_IA -etag, err := bosClient.PutObject(bucketName, objectName, bodyStream, args) -``` - -### 追加上传 - -上文介绍的简单上传方式,创建的Object都是Normal类型,用户不可再进行追加写,这在日志、视频监控、视频直播等数据复写较频繁的场景中使用不方便。 - -正因如此,百度云BOS特别支持了AppendObject,即以追加写的方式上传文件。通过AppendObject操作创建的Object类型为Appendable Object,可以对该Object追加数据。AppendObject大小限制为0~5G。当您的网络情况较差时,推荐使用AppendObject的方式进行上传,每次追加较小数据(如256kb)。 - -通过AppendObject方式上传示例代码如下: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.AppendObjectArgs) - -// 1. 原始接口上传,设置为低频存储,设置追加的偏移位置 -args.StorageClass = api.STORAGE_CLASS_STANDARD_IA -args.Offset = 1024 -res, err := bosClient.AppendObject(bucketName, objectName, bodyStream, args) - -// 2. 封装的简单接口,仅支持设置offset -res, err := bosClient.SimpleAppendObject(bucketName, objectName, bodyStream, offset) - -// 3. 封装的从字符串上传接口,仅支持设置offset -res, err := bosClient.SimpleAppendObjectFromString(bucketName, objectName, "abc", offset) - -// 4. 封装的从给出的文件名上传文件的接口,仅支持设置offset -res, err := bosClient.SimpleAppendObjectFromFile(bucketName, objectName, "", offset) - -fmt.Println(res.ETag) // 打印ETag -fmt.Println(res.ContentMD5) // 打印ContentMD5 -fmt.Println(res.NextAppendOffset) // 打印NextAppendOffset -``` - -### 抓取上传 - -BOS支持用户提供的url自动抓取相关内容并保存为指定Bucket的指定名称的Object。 - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.FetchObjectArgs) - -// 1. 原始接口抓取,设置为异步抓取模式 -args.FetchMode = api.FETCH_MODE_ASYNC -res, err := bosClient.FetchObject(bucket, object, url, args) - -// 2. 基本抓取接口,默认为同步抓取模式 -res, err := bosClient.BasicFetchObject(bucket, object, url) - -// 3. 易用接口,直接指定可选参数 -res, err := bosClient.SimpleFetchObject(bucket, object, url, - api.FETCH_MODE_ASYNC, api.STORAGE_CLASS_STANDARD_IA) - -fmt.Println(res.ETag) // 打印ETag -``` - -### 分块上传 - -除了通过简单上传几追加上传方式将文上传件到BOS以外,BOS还提供了另外一种上传模式 —— Multipart Upload。用户可以在如下的应用场景内(但不仅限于此),使用Multipart Upload上传模式,如: - -- 需要支持断点上传。 -- 上传超过5GB大小的文件。 -- 网络条件较差,和BOS的服务器之间的连接经常断开。 -- 需要流式地上传文件。 -- 上传文件之前,无法确定上传文件的大小。 - -BOS GO SDK提供了分块操作的控制参数: - -- MultipartSize:每个分块的大小,默认为10MB,最小不得低于5MB -- MaxParallel:分块操作的并发数,默认为10 - -下面的示例代码设置了分块的大小为20MB,并发数为100: - -``` -// import "github.com/baidubce/bce-sdk-go/services/bos" - -client := bos.NewClient(, , ) -client.MultipartSize = 20 * (1 << 10) -client.MaxParallel = 100 -``` - -除了上述参数外,还会对设置的每个分块数进行1MB对齐,同时限制是最大分块数目不得超过10000,如果分块较小导致分块数超过这个上限会自动调整分块大小。 - -下面将一步步介绍Multipart Upload的实现。假设有一个文件,本地路径为 `/path/to/file.zip`,由于文件比较大,将其分块传输到BOS中。 - -**初始化Multipart Upload** - -使用`BasicInitiateMultipartUpload`方法来初始化一个基本的分块上传事件: - -```go -res, err := bosClient.BasicInitiateMultipartUpload(bucketName, objectKey) -fmt.Println(res.UploadId) // 打印初始化分块上传后获取的UploadId -``` - -返回结果中含有 `UploadId` ,它是区分分块上传事件的唯一标识,在后面的操作中,我们将用到它。 - -***上传低频存储类型Object的初始化*** - -BOS GO SDK提供的`InitiateMultipartUpload`接口可以设置其他分块上传的相关参数,下面的代码初始化了低频存储的一个分块上传事件: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.InitiateMultipartUploadArgs) -args.StorageClass = api.STORAGE_CLASS_STANDARD_IA -res, err := bosClient.InitiateMultipartUpload(bucketName, objectKey, contentType, args) -fmt.Println(res.UploadId) // 打印初始化分块上传后获取的UploadId -``` - -***上传冷存储类型Object的初始化*** - -初始化低频存储的一个分块上传事件: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.InitiateMultipartUploadArgs) -args.StorageClass = api.STORAGE_CLASS_COLD -res, err := bosClient.InitiateMultipartUpload(bucketName, objectKey, contentType, args) -fmt.Println(res.UploadId) // 打印初始化分块上传后获取的UploadId -``` - -**上传分块** - -接着,把文件分块上传。 - -```go -// import "github.com/baidubce/bce-sdk-go/bce" -// import "github.com/baidubce/bce-sdk-go/services/bos" -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -file, _ := os.Open("/path/to/file.zip") - -// 分块大小按MULTIPART_ALIGN=1MB对齐 -partSize := (bosClient.MultipartSize + - bos.MULTIPART_ALIGN - 1) / bos.MULTIPART_ALIGN * bos.MULTIPART_ALIGN - -// 获取文件大小,并计算分块数目,最大分块数MAX_PART_NUMBER=10000 -fileInfo, _ := file.Stat() -fileSize := fileInfo.Size() -partNum := (fileSize + partSize - 1) / partSize -if partNum > bos.MAX_PART_NUMBER { // 超过最大分块数,需调整分块大小 - partSize = (fileSize + bos.MAX_PART_NUMBER + 1) / bos.MAX_PART_NUMBER - partSize = (partSize + bos.MULTIPART_ALIGN - 1) / bos.MULTIPART_ALIGN * bos.MULTIPART_ALIGN - partNum = (fileSize + partSize - 1) / partSize -} - -// 创建保存每个分块上传后的ETag和PartNumber信息的列表 -partEtags := make([]api.UploadInfoType) - -// 逐个分块上传 -for i := int64(1); i <= partNum; i++ { - // 计算偏移offset和本次上传的大小uploadSize - uploadSize := partSize - offset := partSize * (i - 1) - left := fileSize - offset - if left < partSize { - uploadSize = left - } - - // 创建指定偏移、指定大小的文件流 - partBody, _ := bce.NewBodyFromSectionFile(file, offset, uploadSize) - - // 上传当前分块 - etag, err := bosClient.BasicUploadPart(bucketName, objectKey, uploadId, int(i), partBody) - - // 保存当前分块上传成功后返回的序号和ETag - partEtags = append(partEtags, api.UploadInfoType{int(partNum), etag}) -} -``` - -上面代码的核心是调用 `BasicUploadPart` 方法来上传每一个分块,但是要注意以下几点: - -- BasicUploadPart 方法要求除最后一个Part以外,其他的Part大小都要大于等于5MB。但是该接口并不会立即校验上传Part的大小;只有当Complete Multipart Upload的时候才会校验。 -- 为了保证数据在网络传输过程中不出现错误,建议您在`BasicUploadPart`后,使用每个分块BOS返回的Content-MD5值分别验证已上传分块数据的正确性。当所有分块数据合成一个Object后,不再含MD5值。 -- Part号码的范围是1~10000。如果超出这个范围,BOS将返回InvalidArgument的错误码。 -- 每次上传Part之后,BOS的返回结果会包含一个 `PartETag`对象,它是上传块的ETag与块编号(PartNumber)的组合,在后续完成分块上传的步骤中会用到它,因此需要将其保存起来。一般来讲这些`PartETag` 对象将被保存到List中。 - -**完成分块上传** - -如下代码所示,完成分块上传: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -completeArgs := &api.CompleteMultipartUploadArgs{partEtags} -res, _ := bosClient.CompleteMultipartUploadFromStruct( - bucketName, objectKey, uploadId, completeArgs, nil) - -// 输出结果对象的内容 -fmt.Println(res.Location) -fmt.Println(res.Bucket) -fmt.Println(res.Key) -fmt.Println(res.ETag) -``` - -上面代码中的 `partETags`是第二部中保存的partETag的列表,BOS收到用户提交的Part列表后,会逐一验证每个数据Part的有效性。当所有的数据Part验证通过后,BOS将把这些数据part组合成一个完整的Object。 - -**取消分块上传** - -用户可以使用abortMultipartUpload方法取消分块上传。 - -```go -bosClient.AbortMultipartUpload(bucketName, objectKey, uploadId) -``` - -**获取未完成的分块上传** - -用户可以使用 `ListMultipartUploads` 方法获取Bucket内未完成的分块上传事件。 - -```go -// 列出给定bucket下所有未完成的分块信息 -res, err := BasicListMultipartUploads(bucketName) - -// 输出返回结果状态信息 -fmt.Println(res.Bucket) -fmt.Println(res.Delimiter) -fmt.Println(res.Prefix) -fmt.Println(res.IsTruncated) -fmt.Println(res.KeyMarker) -fmt.Println(res.NextKeyMarker) -fmt.Println(res.MaxUploads) - -// 遍历所有未完成分块信息列表 -for _, multipartUpload := range res.Uploads { - fmt.Println("Key:", multipartUpload.Key, ", UploadId:", multipartUpload.UploadId) -} -``` - -> **注意:** -> 1. 默认情况下,如果Bucket中的分块上传事件的数目大于1000,则只会返回1000个Object,并且返回结果中IsTruncated的值为True,同时返回NextKeyMarker作为下次读取的起点。 -> 2. 若想返回更多分块上传事件的数目,可以使用KeyMarker参数分次读取。 - -**获取所有已上传的块信息** - -用户可以使用 `ListParts` 方法获取某个上传事件中所有已上传的块。 - -```go -// 使用基本接口列出当前上传成功的分块 -res, err := bosClient.BasicListParts(bucketName, objectKey, uploadId) - -// 使用原始接口提供参数,列出当前上传成功的最多100个分块 -args := new(api.ListPartsArgs) -args.MaxParts = 100 -res, err := bosClient.ListParts(bucketName, objectKey, uploadId, args) - -// 打印返回的状态结果 -fmt.Println(res.Bucket) -fmt.Println(res.Key) -fmt.Println(res.UploadId) -fmt.Println(res.Initiated) -fmt.Println(res.StorageClass) -fmt.Println(res.PartNumberMarker) -fmt.Println(res.NextPartNumberMarker) -fmt.Println(res.MaxParts) -fmt.Println(res.IsTruncated) - -// 打印分块信息 -for _, part := range res.Parts { - fmt.Println("PartNumber:", part.PartNumber, ", Size:", part.Size, - ", ETag:", part.ETag, ", LastModified:", part.LastModified) -} -``` - -> **注意:** -> 1. 默认情况下,如果Bucket中的分块上传事件的数目大于1000,则只会返回1000个Object,并且返回结果中IsTruncated的值为True,同时返回NextPartNumberMarker作为下次读取的起点。 -> 2. 若想返回更多分块上传事件的数目,可以使用PartNumberMarker参数分次读取。 - -上述示例是使用API依次实现,没有并发执行,如果需要加快速度需要用户实现并发上传的部分。为了方便用户使用,BOS Client特封装了分块上传的并发接口`UploadSuperFile`: - -- 接口:`UploadSuperFile(bucket, object, fileName, storageClass string) error` -- 参数: - - bucket: 上传对象的bucket的名称 - - object: 上传对象的名称 - - fileName: 本地文件名称 - - storageClass: 上传对象的存储类型,默认标准存储 -- 返回值: - - error: 上传过程中的错误,成功则为空 - -用户只需给出`bucket`、`object`、`filename`即可并发的进行分块上传,同时也可指定上传对象的`storageClass`。 - -## 下载文件 - -BOS GO SDK提供了丰富的文件下载接口,用户可以通过以下方式从BOS中下载文件: - -- 简单流式下载 -- 下载到本地文件 -- 范围下载 - -### 简单流式下载 - -用户可以通过如下代码将Object读取到一个流中: - -```go -// 提供Bucket和Object,直接获取一个对象 -res, err := bosClient.BasicGetObject(bucketName, objectName) - -// 获取ObjectMeta -meta := res.ObjectMeta - -// 获取Object的读取流(io.ReadCloser) -stream := res.Body - -// 确保关闭Object读取流 -defer stream.Close() - -// 调用stream对象的Read方法处理Object -... -``` - -> **注意:** -> 1. 上述接口的返回结果对象中包含了Object的各种信息,包含Object所在的Bucket、Object的名称、MetaData以及一个读取流。 -> 2. 可通过结果对象的ObjectMeta字段获取对象的元数据,它包含了Object上传时定义的ETag,Http Header以及自定义的元数据。 -> 3. 可通过结果对象的Body字段获取返回Object的读取流,通过操作读取流将Object的内容读取到文件或者内存中或进行其他操作。 - -### 下载到本地文件 - -用户可以通过如下代码直接将Object下载到指定文件: - -```go -err := bosClient.BasicGetObjectToFile(bucketName, objectName, "path-to-local-file") -``` - -### 范围下载 - -为了实现更多的功能,可以指定下载范围、返回header来实现更精细化地获取Object。如果指定的下载范围是0 - 100,则返回第0到第100个字节的数据,包括第100个,共101字节的数据,即[0, 100]。 - -```go -// 指定范围起始位置和返回header -responseHeaders := map[string]string{"ContentType": "image/gif"} -rangeStart = 1024 -rangeEnd = 2048 -res, err := bosClient.GetObject(bucketName, objectName, responseHeaders, rangeStart, rangeEnd) - -// 只指定起始位置start -res, err := bosClient.GetObject(bucketName, objectName, responseHeaders, rangeStart) - -// 不指定range -res, err := bosClient.GetObject(bucketName, objectName, responseHeaders) - -// 不指定返回可选头部 -res, err := bosClient.GetObject(bucketName, objectName, nil) -``` - -基于范围下载接口,用户可以据此实现文件的分段下载和断点续传。为了方便用户使用,BOS GO SDK封装了并发下载的接口`DownloadSuperFile`: - -- 接口:`DownloadSuperFile(bucket, object, fileName string) error` -- 参数: - - bucket: 下载对象所在bucket的名称 - - object: 下载对象的名称 - - fileName: 该对象保存到本地的文件名称 -- 返回值: - - error: 下载过程中的错误,成功则为空 - -该接口利用并发控制参数执行并发范围下载,直接下载到用户指定的文件中。 - -### 其他使用方法 - -**获取Object的存储类型** - -Object的storage class属性分为`STANDARD`(标准存储)、`STANDARD_IA`(低频存储)和`COLD`(冷存储),通过如下代码可以实现: - -```go -res, err := bosClient.GetObjectMeta(bucketName, objectName) -fmt.Println(res.StorageClass) -``` - -**只获取Object Metadata** - -通过GetObjectMeta方法可以只获取Object Metadata而不获取Object的实体。如下代码所示: - -```go -res, err := bosClient.GetObjectMeta(bucketName, objectName) -fmt.Printf("Metadata: %+v\n", res) -``` - -## 获取文件下载URL - -用户可以通过如下代码获取指定Object的URL: - -```go -// 1. 原始接口,可设置bucket、object名称,过期时间、请求方法、请求头和请求参数 -url := bosClient.GeneratePresignedUrl(bucketName, objectName, - expirationInSeconds, method, headers, params) - -// 2. 基本接口,默认为`GET`方法,仅需设置过期时间 -url := bosClient.BasicGeneratePresignedUrl(bucketName, objectName, expirationInSeconds) -``` - -> **说明:** -> -> * 用户在调用该函数前,需要手动设置endpoint为所属区域域名。百度云目前开放了多区域支持,请参考[区域选择说明](https://cloud.baidu.com/doc/Reference/Regions.html)。目前支持“华北-北京”、“华南-广州”和“华东-苏州”三个区域。北京区域:`http://bj.bcebos.com`,广州区域:`http://gz.bcebos.com`,苏州区域:`http://su.bcebos.com`。 -> * `expirationInSeconds`为指定的URL有效时长,时间从当前时间算起,为可选参数,不配置时系统默认值为1800秒。如果要设置为永久不失效的时间,可以将`expirationInSeconds`参数设置为-1,不可设置为其他负数。 -> * 如果预期获取的文件时公共可读的,则对应URL链接可通过简单规则快速拼接获取: http://{$bucketName}.{$region}.bcebos.com/{$objectName}。 - -## 列举存储空间中的文件 - -BOS GO SDK支持用户通过以下两种方式列举出object: - -- 简单列举 -- 通过参数复杂列举 - -除此之外,用户还可在列出文件的同时模拟文件夹。 - -### 简单列举 - -当用户希望简单快速列举出所需的文件时,可通过ListObjects方法返回ListObjectsResult对象,ListObjectsResult对象包含了此次请求的返回结果。用户可以从ListObjectsResult对象的Contents字段获取Object的所有描述信息。 - -```go -listObjectResult, err := bosClient.ListObjects(bucketName, nil) - -// 打印当前ListObjects请求的状态结果 -fmt.Println("Name:", listObjectResult.Name) -fmt.Println("Prefix:", listObjectResult.Prefix) -fmt.Println("Delimiter:", listObjectResult.Delimiter) -fmt.Println("Marker:", listObjectResult.Marker) -fmt.Println("NextMarker:", listObjectResult.NextMarker) -fmt.Println("MaxKeys:", listObjectResult.MaxKeys) -fmt.Println("IsTruncated:", listObjectResult.IsTruncated) - -// 打印Contents字段的具体结果 -for _, obj := range listObjectResult.Contents { - fmt.Println("Key:", obj.Key, ", ETag:", obj.ETag, ", Size:", obj.Size, - ", LastModified:", obj.LastModified, ", StorageClass:", obj.StorageClass) -} -``` - -> **注意:** -> 1. 默认情况下,如果Bucket中的Object数量大于1000,则只会返回1000个Object,并且返回结果中IsTruncated值为True,并返回NextMarker做为下次读取的起点。 -> 2. 若想增大返回Object的数目,可以使用Marker参数分次读取。 - -### 通过参数复杂列举 - -除上述简单列举外,用户还可通过设置ListObjectsArgs参数实现各种灵活的查询功能。ListObjectsArgs可设置的参数如下: - -参数 | 功能 ------|----- -Prefix | 限定返回的object key必须以prefix作为前缀 -Delimiter | 分隔符,是一个用于对Object名字进行分组的字符所有名字包含指定的前缀且第一次出现。Delimiter字符之间的Object作为一组元素 -Marker | 设定结果从marker之后按字母排序的第一个开始返回 -MaxKeys | 限定此次返回object的最大数,如果不设定,默认为1000,max-keys取值不能大于1000 - -> **注意:** -> 1. 如果有Object以Prefix命名,当仅使用Prefix查询时,返回的所有Key中仍会包含以Prefix命名的Object,详见[递归列出目录下所有文件](#递归列出目录下所有文件)。 -> 2. 如果有Object以Prefix命名,当使用Prefix和Delimiter组合查询时,返回的所有Key中会有Null,Key的名字不包含Prefix前缀,详见[查看目录下的文件和子目录](#查看目录下的文件和子目录)。 - -下面我们分别以几个案例说明通过参数列举的方法: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.ListObjectsArgs) - -// 指定最大返回参数为500 -args.MaxKeys = 500 - -// 指定满足特定前缀 -args.Prefix = "my-prefix/" - -// 指定分隔符,实现类似文件夹的功能 -args.Delimiter = "/" - -// 设置特定Object之后的排序结果 -args.Marker = "bucket/object-0" - -listObjectResult, err := bosClient.ListObjects(bucketName, args) -``` - -### 模拟文件夹功能 - -在BOS的存储结果中是没有文件夹这个概念的,所有元素都是以Object来存储,但BOS的用户在使用数据时往往需要以文件夹来管理文件。因此,BOS提供了创建模拟文件夹的能力,其本质上来说是创建了一个size为0的Object。对于这个Object可以上传下载,只是控制台会对以“/”结尾的Object以文件夹的方式展示。 - -用户可以通过Delimiter和Prefix参数的配合模拟出文件夹功能。Delimiter和Prefix的组合效果是这样的: - -如果把Prefix设为某个文件夹名,就可以罗列以此Prefix开头的文件,即该文件夹下递归的所有的文件和子文件夹(目录)。文件名在Contents中显示。 -如果再把 Delimiter 设置为“/”时,返回值就只罗列该文件夹下的文件和子文件夹(目录),该文件夹下的子文件名(目录)返回在CommonPrefixes 部分,子文件夹下递归的文件和文件夹不被显示。 - -如下是几个应用方式: - -**列出Bucket内所有文件** - -当用户需要获取Bucket下的所有文件时,可以参考如下代码: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.ListObjectsArgs) -args.Delimiter = "/" -listObjectResult, err := bosClient.ListObjects(bucketName, args) -``` - -**递归列出目录下所有文件** - -可以通过设置 `Prefix` 参数来获取某个目录下所有的文件: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.ListObjectsArgs) -args.Prefix = "fun/" -listObjectResult, err := bosClient.ListObjects(bucketName, args) -fmt.Println("Objects:") -for _, obj := range listObjectResult.Contents { - fmt.Println(obj.Key) -} -``` - -输出: - - Objects: - fun/ - fun/movie/001.avi - fun/movie/007.avi - fun/test.jpg - -**查看目录下的文件和子目录** - -在 `Prefix` 和 `Delimiter` 结合的情况下,可以列出目录下的文件和子目录: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.ListObjectsArgs) -args.Delimiter = "/" -args.Prefix = "fun/" -listObjectResult, err := bosClient.ListObjects(bucketName, args) - -// 遍历所有的Objects(当前目录和直接子文件) -fmt.Println("Objects:") -for _, obj := range listObjectResult.Contents { - fmt.Println(obj.Key) -} - -// 遍历所有的CommonPrefix(子目录) -fmt.Println("CommonPrefixs:") -for _, obj := range listObjectResult.CommonPrefixes { - fmt.Println(obj.Prefix) -} -``` - -输出: - Objects: - fun/ - fun/test.jpg - - CommonPrefixs: - fun/movie/ - - -返回的结果中,`ObjectSummaries` 的列表中给出的是fun目录下的文件。而`CommonPrefixs`的列表中给出的是fun目录下的所有子文件夹。可以看出`fun/movie/001.avi` ,`fun/movie/007.avi`两个文件并没有被列出来,因为它们属于 `fun` 文件夹下的 `movie` 目录。 - -### 列举Bucket中object的存储属性 - -当用户完成上传后,如果需要查看指定Bucket中的全部Object的storage class属性,可以通过如下代码实现: - -```go -listObjectResult, err := bosClient.ListObjects(bucketName, args) -for _, obj := range listObjectResult.Contents { - fmt.Println("Key:", obj.Key) - fmt.Println("LastModified:", obj.LastModified) - fmt.Println("ETag:", obj.ETag) - fmt.Println("Size:", obj.Size) - fmt.Println("StorageClass:", obj.StorageClass) - fmt.Println("Owner:", obj.Owner.Id, obj.Owner.DisplayName) -} -``` - -## 删除文件 - -**删除单个文件** - -可参考如下代码删除了一个Object: - -```go -// 指定要删除Object名称和所在的Bucket名称 -err := bosClient.DeleteObject(bucketName, objectName) -``` - -**删除多个文件** - -用户也可通过一次调用删除同一个Bucket下的多个文件,有如下参数: - -参数名称 | 描述 | 父节点 ----------|---------|-------- -objects | 保存要删除的Object信息的容器,包含一个或多个Object元素 | - -+key | 要删除的Object的名称 | objects - -具体示例如下: - -``` -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -// 1. 原始接口,提供多个Object的List Stream -res, err := bosClient.DeleteMultipleObjects(bucket, objectListStream) - -// 2. 提供json字符串删除 -objectList := `{ - "objects":[ - {"key": "aaa"}, - {"key": "bbb"} - ] -}` -res, err := bosClient.DeleteMultipleObjectsFromString(bucket, objectList) - -// 3. 提供删除Object的List对象 -deleteObjectList := make([]api.DeleteObjectArgs, 0) -deleteObjectList = append(deleteObjectList, api.DeleteObjectArgs{"aaa"}) -deleteObjectList = append(deleteObjectList, api.DeleteObjectArgs{"bbb"}) -multiDeleteObj := &api.DeleteMultipleObjectsArgs{deleteObjectList} -res, err := bosClient.DeleteMultipleObjectsFromStruct(bucket, multiDeleteObj) - -// 4. 直接提供待删除Object的名称列表 -deleteObjects := []string{"aaa", "bbb"} -res, err := bosClient.DeleteMultipleObjectsFromKeyList(bucket, deleteObjects) -``` - -> **说明:** -> -> 一次删除多个Object的时候,返回的结果里包含了未删除成功的Object名称列表。删除部分对象成功时`res`里包含了未删除成功的名称列表。 -> 删除部分对象成功时`err`为`nil`且`res`不为`nil`,判断全部删除成功:`err`为`io.EOF`且`res`为`nil`。 - -## 查看文件是否存在 - -用户可通过如下操作查看某文件是否存在: - -```go -// import "github.com/baidubce/bce-sdk-go/bce" - -_, err := bosClient.GetObjectMeta(bucketName, objectName) -if realErr, ok := err.(*bce.BceServiceError); ok { - if realErr.StatusCode == 404 { - fmt.Println("object not exists") - } -} -fmt.Println("object exists") -``` - -## 获取及更新文件元信息 - -文件元信息(Object Metadata),是对用户上传BOS的文件的属性描述,分为两种:HTTP标准属性(HTTP Headers)和User Meta(用户自定义元信息)。 - -### 获取文件元信息 - -用户通过GetObjectMeta方法可以只获取Object Metadata而不获取Object的实体。如下代码所示: - -```go -res, err := bosClient.GetObjectMeta(bucketName, objectName) -fmt.Printf("Metadata: %+v\n", res) -``` - -### 修改文件元信息 - -BOS修改Object的Metadata通过拷贝Object实现。即拷贝Object的时候,把目的Bucket设置为源Bucket,目的Object设置为源Object,并设置新的Metadata,通过拷贝自身实现修改Metadata的目的。如果不设置新的Metadata,则报错。这种方式下必须使用拷贝模式为“replace”(默认情况为“copy”)。示例如下: - -```go -// import "github.com/baidubce/bce-sdk-go/bce" - -args := new(api.CopyObjectArgs) - -// 必须设置拷贝模式为"replace",默认为"copy"是不能执行Metadata修改的 -args.MetadataDirective="replace" - -// 设置Metadata参数值,具体字段请参考官网说明 -args.LastModified = "Wed, 29 Nov 2017 13:18:08 GMT" -args.ContentType = "text/json" - -// 使用CopyObject接口修改Metadata,源对象和目的对象相同 -res, err := bosClient.CopyObject(bucket, object, bucket, object, args) -``` - -## 拷贝文件 - -### 拷贝一个文件 - -用户可以通过CopyObject方法拷贝一个Object,如下代码所示: - -```go -// 1. 原始接口,可设置拷贝参数 -res, err := bosClient.CopyObject(bucketName, objectName, srcBucket, srcObject, nil) - -// 2. 忽略拷贝参数,使用默认 -res, err := bosClient.BasicCopyObject(bucketName, objectName, srcBucket, srcObject) - -fmt.Println("ETag:", res.ETag, "LastModified:", res.LastModified) -``` - -上述接口返回的结果对象中包含了新Object的ETag和修改时间LastModified。 - -### 设置拷贝参数拷贝Object - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.CopyObjectArgs) - -// 设置用户自定义Metadata -args.UserMeta = map[string]string{"": ""} - -res, err := bosClient.CopyObject(bucketName, objectName, srcBucket, srcObject, args) -fmt.Println("ETag:", res.ETag, "LastModified:", res.LastModified) -``` - -**设置Object的Copy属性** - -用户在执行拷贝的过程中,可以对源Object的Etag或修改状态进行判断,根据判断结果决定是否执行拷贝。详细的参数解释如下: - -| 名称 | 类型 | 描述 | 是否必需 | -| --- | --- | --- | ---- | -| x-bce-copy-source-if-match | String | 如果源Object的ETag值和用户提供的ETag相等,则执行拷贝操作,否则拷贝失败。 | 否 | -| x-bce-copy-source-if-none-match | String | 如果源Object的ETag和用户提供的ETag不相等,则执行拷贝操作,否则拷贝失败。 | 否 | -| x-bce-copy-source-if-unmodified-since | String | 如果源object在x-bce-copy-source-if-unmodified-since之后没被修改,则执行拷贝操作,否则拷贝失败。 | 否 | -| x-bce-copy-source-if-modified-since | String | 如果源object在x-bce-copy-source-if-modified-since之后被修改了,则执行拷贝操作,否则拷贝失败。 | 否 | - -对应的示例代码: - -```go -// import "github.com/baidubce/bce-sdk-go/services/bos/api" - -args := new(api.CopyObjectArgs) - -// 设置用户自定义Metadata -args.UserMeta = map[string]string{"": ""} - -// 设置copy-source-if-match -args.IfMatch = "111111111183bf192b57a4afc76fa632" - -// 设置copy-source-if-none-match -args.IfNoneMatch = "111111111183bf192b57a4afc76fa632" - -// 设置copy-source-if-modified-since -args.IfModifiedSince = "Fri, 16 Mar 2018 17:07:21 GMT" - -// 设置copy-source-if-unmodified-since -args.IfUnmodifiedSince = "Fri, 16 Mar 2018 17:07:21 GMT" - -res, err := bosClient.CopyObject(bucketName, objectName, srcBucket, srcObject, args) -fmt.Println("ETag:", res.ETag, "LastModified:", res.LastModified) -``` - -**同步Copy功能** - -当前BOS的CopyObject接口是通过同步方式实现的。同步方式下,BOS端会等待Copy实际完成才返回成功。同步Copy能帮助用户更准确的判断Copy状态,但用户感知的复制时间会变长,且复制时间和文件大小成正比。 - -同步Copy方式更符合业界常规,提升了与其它平台的兼容性。同步Copy方式还简化了BOS服务端的业务逻辑,提高了服务效率。 - - -# 数据处理及使用 - -## 生命周期管理 - -BOS支持用户对Bucket设置生命周期规则,以自动将过期的文件清除,节省存储空间。针对不同前缀的文件,用户可以同时设置多条规则。 -在为Bucket设置一条生命周期规则时,需注意如下参数的使用方式: - -规则项 | 描述 | 是否必填 | 备注 --------|--------|------------|-------- -id | 规则的标识符 | 必填 | 同一个bucket内规则id必须唯一,不能重复。如果用户不填系统会自动帮用户生成一个 -status | 规则的状态 | 必填 | 取值为enabled或disabled,当规则处于disabled时规则不生效 -resource | 规则对哪些资源生效 | 必填 | 举例:对samplebucket里以prefix/为前缀的Object生效:`samplebucket/prefix/*` -condition | 规则依赖的条件 | 必填 | 目前只支持time形式 -+time | 时间限制条件 | 必填 | 通过定义的dateGreaterThan实现 -++dateGreaterThan | 描述时间关系 | 必填 | 支持绝对时间date和相对时间days。绝对时间date格式为yyyy-mm-ddThh:mm:ssZ,eg. 2016-09-07T00:00:00Z。绝对时间为UTC时间,必须以00:00:00(UTC 0点)结尾;相对时间days的描述遵循ISO8601,支持的最小时间粒度为天,如:$(lastModified)+P7D表示的时间为object的last-modified之后7天。 -action | 对resource执行的操作动作 | 必填 | - -+name | 执行的操作名称 | 必填 | 取值为Transition、DeleteObject、AbortMultipartUpload -+storageClass | Object的存储类型 | 可选 | action为Transition时可以设定,取值为STANDARD_IA或COLD,表示从原存储类型转为低频存储或冷存储 - -### 设置生命周期规则 - -可通过如下代码设置一条生命周期规则: - -```go -// import "github.com/baidubce/bce-sdk-go/bce" - -ruleStr := `{ - "rule": [ - { - "id": "delete-rule-1", - "status": "enabled", - "resource": ["my-bucket/abc*"], - "condition": { - "time": { - "dateGreaterThan": "2018-01-01T00:00:00Z" - } - }, - "action": { - "name": "DeleteObject" - } - } - ] -}` - -// 1. 通过stream调用接口进行设置 -body, _ := bce.NewBodyFromString(ruleStr) -err := bosClient.PutBucketLifecycle(bucketName, body) - -// 2. 直接传入字符串 -err := bosClient.PutBucketLifecycleFromString(bucketName, ruleStr) -``` - -### 查看生命周期规则 - -可通过如下代码查看Bucket内的生命周期规则: - -```go -res, err := bosClient.GetBucketLifecycle(bucketName) -fmt.Printf("%+v\n", res.Rule) -``` - -### 删除生命周期规则 - -可通过如下代码清空生命周期规则: - -```go -err := bosClient.DeleteBucketLifecycle(bucketName) -``` - -## 管理存储类型 - -每个Bucket会有自身的存储类型,如果该Bucket下的Object上传时未指定存储类型则会默认继承该Bucket的存储类型。 - -### 设置Bucket存储类型 - -Bucket默认的存储类型为标准模式,用户可以通过下面的代码进行设置: - -``` -storageClass := "STANDARD_IA" -err := bosClient.PutBucketStorageclass(bucketName, storageClass) -``` - -### 获取Bucket存储类型 - -下面的代码可以查看一个Bucket的默认存储类型: - -``` -storageClass, err := bosClient.GetBucketStorageclass(bucketName) -``` - -## 设置访问日志 - -BOS GO SDK支持将用户访问Bucket时的请求记录记录为日志,用户可以指定访问Bucket的日志存放的位置。日志会包括请求者、Bucket名称、请求时间和请求操作等。关于Bucket日志的详细功能说明可参见[设置访问日志](https://cloud.baidu.com/doc/BOS/DevRef.html#.E6.97.A5.E5.BF.97.E6.A0.BC.E5.BC.8F)。 - -### 开启Bucket日志 - -用户通过设置用于放置日志的Bucket和日志文件前缀来开启Bucket日志功能。下面的示例代码可以设置访问日志的位置和前缀: - -``` -// import "github.com/baidubce/bce-sdk-go/bce" - -// 1. 从JSON字符串设置 -loggingStr := `{"targetBucket": "logging-bucket", "targetPrefix": "my-log/"}` -err := bosClient.PutBucketLoggingFromString(bucketName, loggingStr) - -// 2. 从参数对象设置 -args := new(api.PutBucketLoggingArgs) -args.TargetBucket = "logging-bucket" -args.TargetPrefix = "my-log/" -err := bosClient.PutBucketLoggingFromStruct(bucketName, args) - -// 3. 读取json格式的文件进行设置 -loggingStrem := bce.NewBodyFromFile("") -err := bosClient.PutBucketLogging(bucketName, loggingStream) -``` - -### 查看Bucket日志设置 - -下面的代码分别给出了如何获取给定Bucket的日志配置信息: - -```go -res, err := bosClient.GetBucketLogging(bucketName) -fmt.Println(res.Status) -fmt.Println(res.TargetBucket) -fmt.Println(res.TargetPrefix) -``` - -### 关闭Bucket日志 - -需要关闭Bucket的日志功能是,只需调用删除接口即可实现: - -```go -err := bosClient.DeleteBucketLogging(bucketName) -``` - - -# 错误处理 - -GO语言以error类型标识错误,BOS支持两种错误见下表: - -错误类型 | 说明 -----------------|------------------- -BceClientError | 用户操作产生的错误 -BceServiceError | BOS服务返回的错误 - -用户使用SDK调用BOS相关接口,除了返回所需的结果之外还会返回错误,用户可以获取相关错误进行处理。实例如下: - -``` -// bosClient 为已创建的BOS Client对象 -bucketLocation, err := bosClient.PutBucket("test-bucket") -if err != nil { - switch realErr := err.(type) { - case *bce.BceClientError: - fmt.Println("client occurs error:", realErr.Error()) - case *bce.BceServiceError: - fmt.Println("service occurs error:", realErr.Error()) - default: - fmt.Println("unknown error:", err) - } -} else { - fmt.Println("create bucket success, bucket location:", bucketLocation) -} -``` - -## 客户端异常 - -客户端异常表示客户端尝试向BOS发送请求以及数据传输时遇到的异常。例如,当发送请求时网络连接不可用时,则会返回BceClientError;当上传文件时发生IO异常时,也会抛出BceClientError。 - -## 服务端异常 - -当BOS服务端出现异常时,BOS服务端会返回给用户相应的错误信息,以便定位问题。常见服务端异常可参见[BOS错误信息格式](https://cloud.baidu.com/doc/BOS/API.html#.E9.94.99.E8.AF.AF.E4.BF.A1.E6.81.AF.E6.A0.BC.E5.BC.8F) - -## SDK日志 - -BOS GO SDK自行实现了支持六个级别、三种输出(标准输出、标准错误、文件)、基本格式设置的日志模块,导入路径为`github.com/baidubce/bce-sdk-go/util/log`。输出为文件时支持设置五种日志滚动方式(不滚动、按天、按小时、按分钟、按大小),此时还需设置输出日志文件的目录。详见示例代码。 - -### 默认日志 - -BOS GO SDK自身使用包级别的全局日志对象,该对象默认情况下不记录日志,如果需要输出SDK相关日志需要用户自定指定输出方式和级别,详见如下示例: - -``` -// import "github.com/baidubce/bce-sdk-go/util/log" - -// 指定输出到标准错误,输出INFO及以上级别 -log.SetLogHandler(log.STDERR) -log.SetLogLevel(log.INFO) - -// 指定输出到标准错误和文件,DEBUG及以上级别,以1GB文件大小进行滚动 -log.SetLogHandler(log.STDERR | log.FILE) -log.SetLogDir("/tmp/gosdk-log") -log.SetRotateType(log.ROTATE_SIZE) -log.SetRotateSize(1 << 30) - -// 输出到标准输出,仅输出级别和日志消息 -log.SetLogHandler(log.STDOUT) -log.SetLogFormat([]string{log.FMT_LEVEL, log.FMT_MSG}) -``` - -说明: - 1. 日志默认输出级别为`DEBUG` - 2. 如果设置为输出到文件,默认日志输出目录为`/tmp`,默认按小时滚动 - 3. 如果设置为输出到文件且按大小滚动,默认滚动大小为1GB - 4. 默认的日志输出格式为:`FMT_LEVEL, FMT_LTIME, FMT_LOCATION, FMT_MSG` - -### 项目使用 - -该日志模块无任何外部依赖,用户使用GO SDK开发项目,可以直接引用该日志模块自行在项目中使用,用户可以继续使用GO SDK使用的包级别的日志对象,也可创建新的日志对象,详见如下示例: - -``` -// 直接使用包级别全局日志对象(会和GO SDK自身日志一并输出) -log.SetLogHandler(log.STDERR) -log.Debugf("%s", "logging message using the log package in the BOS go sdk") - -// 创建新的日志对象(依据自定义设置输出日志,与GO SDK日志输出分离) -myLogger := log.NewLogger() -myLogger.SetLogHandler(log.FILE) -myLogger.SetLogDir("/home/log") -myLogger.SetRotateType(log.ROTATE_SIZE) -myLogger.Info("this is my own logger from the BOS go sdk") -``` - - -# 版本变更记录 - -## v0.9.2 [2018-3-16] - - - 修复go get下载时的错误提示信息 - - 修复重试请求时请求的body流丢失的问题 - - 完善UploadSuperFile返回的错误提示信息 - - 将GeneratePresignedUrl接口统一调整为bucket virtual hosting模式 - -## v0.9.1 [2018-1-4] - -首次发布: - - - 创建、查看、罗列、删除Bucket,获取位置和判断是否存在 - - 支持管理Bucket的生命周期、日志、ACL、存储类型 - - 上传、下载、删除、罗列Object,支持分块上传、分块拷贝 - - 提供AppendObject功能和FetchObject功能 - - 封装并发的下载和分块上传接口 diff --git a/vendor/github.com/baidubce/bce-sdk-go/init.go b/vendor/github.com/baidubce/bce-sdk-go/init.go deleted file mode 100644 index e4f63d0..0000000 --- a/vendor/github.com/baidubce/bce-sdk-go/init.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2017 Baidu, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - */ - -// init.go - just import the sub packages - -// Package sdk imports all sub packages to build all of them when calling `go install', `go build' -// or `go get' commands. -package sdk - -import ( - _ "github.com/baidubce/bce-sdk-go/auth" - _ "github.com/baidubce/bce-sdk-go/bce" - _ "github.com/baidubce/bce-sdk-go/http" - _ "github.com/baidubce/bce-sdk-go/services/bos" - _ "github.com/baidubce/bce-sdk-go/services/sts" - _ "github.com/baidubce/bce-sdk-go/util" - _ "github.com/baidubce/bce-sdk-go/util/log" -) diff --git a/vendor/github.com/denverdino/aliyungo/LICENSE.txt b/vendor/github.com/denverdino/aliyungo/LICENSE.txt deleted file mode 100644 index 9182971..0000000 --- a/vendor/github.com/denverdino/aliyungo/LICENSE.txt +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2015 Li Yi (denverdino@gmail.com). - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/denverdino/aliyungo/common/client.go b/vendor/github.com/denverdino/aliyungo/common/client.go deleted file mode 100755 index 69a9c3d..0000000 --- a/vendor/github.com/denverdino/aliyungo/common/client.go +++ /dev/null @@ -1,268 +0,0 @@ -package common - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "log" - "net/http" - "net/url" - "strings" - "time" - - "github.com/denverdino/aliyungo/util" -) - -// A Client represents a client of ECS services -type Client struct { - AccessKeyId string //Access Key Id - AccessKeySecret string //Access Key Secret - debug bool - httpClient *http.Client - endpoint string - version string - serviceCode string - regionID Region - businessInfo string -} - -// NewClient creates a new instance of ECS client -func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) { - client.AccessKeyId = accessKeyId - client.AccessKeySecret = accessKeySecret + "&" - client.debug = false - client.httpClient = &http.Client{} - client.endpoint = endpoint - client.version = version -} - -func (client *Client) NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region) { - client.Init(endpoint, version, accessKeyId, accessKeySecret) - client.serviceCode = serviceCode - client.regionID = regionID - client.setEndpointByLocation(regionID, serviceCode, accessKeyId, accessKeySecret) -} - -//NewClient using location service -func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKeyId, accessKeySecret string) { - locationClient := NewLocationClient(accessKeyId, accessKeySecret) - ep := locationClient.DescribeOpenAPIEndpoint(region, serviceCode) - if ep == "" { - ep = loadEndpointFromFile(region, serviceCode) - } - - if ep != "" { - client.endpoint = ep - } -} - -// SetEndpoint sets custom endpoint -func (client *Client) SetEndpoint(endpoint string) { - client.endpoint = endpoint -} - -// SetEndpoint sets custom version -func (client *Client) SetVersion(version string) { - client.version = version -} - -func (client *Client) SetRegionID(regionID Region) { - client.regionID = regionID -} - -//SetServiceCode sets serviceCode -func (client *Client) SetServiceCode(serviceCode string) { - client.serviceCode = serviceCode -} - -// SetAccessKeyId sets new AccessKeyId -func (client *Client) SetAccessKeyId(id string) { - client.AccessKeyId = id -} - -// SetAccessKeySecret sets new AccessKeySecret -func (client *Client) SetAccessKeySecret(secret string) { - client.AccessKeySecret = secret + "&" -} - -// SetDebug sets debug mode to log the request/response message -func (client *Client) SetDebug(debug bool) { - client.debug = debug -} - -// SetBusinessInfo sets business info to log the request/response message -func (client *Client) SetBusinessInfo(businessInfo string) { - if strings.HasPrefix(businessInfo, "/") { - client.businessInfo = businessInfo - } else if businessInfo != "" { - client.businessInfo = "/" + businessInfo - } -} - -// Invoke sends the raw HTTP request for ECS services -func (client *Client) Invoke(action string, args interface{}, response interface{}) error { - - request := Request{} - request.init(client.version, action, client.AccessKeyId) - - query := util.ConvertToQueryValues(request) - util.SetQueryValues(args, &query) - - // Sign request - signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret) - - // Generate the request URL - requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature) - - httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) - - if err != nil { - return GetClientError(err) - } - - // TODO move to util and add build val flag - httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) - - t0 := time.Now() - httpResp, err := client.httpClient.Do(httpReq) - t1 := time.Now() - if err != nil { - return GetClientError(err) - } - statusCode := httpResp.StatusCode - - if client.debug { - log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0)) - } - - defer httpResp.Body.Close() - body, err := ioutil.ReadAll(httpResp.Body) - - if err != nil { - return GetClientError(err) - } - - if client.debug { - var prettyJSON bytes.Buffer - err = json.Indent(&prettyJSON, body, "", " ") - log.Println(string(prettyJSON.Bytes())) - } - - if statusCode >= 400 && statusCode <= 599 { - errorResponse := ErrorResponse{} - err = json.Unmarshal(body, &errorResponse) - ecsError := &Error{ - ErrorResponse: errorResponse, - StatusCode: statusCode, - } - return ecsError - } - - err = json.Unmarshal(body, response) - //log.Printf("%++v", response) - if err != nil { - return GetClientError(err) - } - - return nil -} - -// Invoke sends the raw HTTP request for ECS services -//改进了一下上面那个方法,可以使用各种Http方法 -//2017.1.30 增加了一个path参数,用来拓展访问的地址 -func (client *Client) InvokeByAnyMethod(method, action, path string, args interface{}, response interface{}) error { - - request := Request{} - request.init(client.version, action, client.AccessKeyId) - - data := util.ConvertToQueryValues(request) - util.SetQueryValues(args, &data) - - // Sign request - signature := util.CreateSignatureForRequest(method, &data, client.AccessKeySecret) - - data.Add("Signature", signature) - // Generate the request URL - var ( - httpReq *http.Request - err error - ) - if method == http.MethodGet { - requestURL := client.endpoint + path + "?" + data.Encode() - //fmt.Println(requestURL) - httpReq, err = http.NewRequest(method, requestURL, nil) - } else { - //fmt.Println(client.endpoint + path) - httpReq, err = http.NewRequest(method, client.endpoint+path, strings.NewReader(data.Encode())) - httpReq.Header.Set("Content-Type", "application/x-www-form-urlencoded") - } - - if err != nil { - return GetClientError(err) - } - - // TODO move to util and add build val flag - httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) - - t0 := time.Now() - httpResp, err := client.httpClient.Do(httpReq) - t1 := time.Now() - if err != nil { - return GetClientError(err) - } - statusCode := httpResp.StatusCode - - if client.debug { - log.Printf("Invoke %s %s %d (%v) %v", ECSRequestMethod, client.endpoint, statusCode, t1.Sub(t0), data.Encode()) - } - - defer httpResp.Body.Close() - body, err := ioutil.ReadAll(httpResp.Body) - - if err != nil { - return GetClientError(err) - } - - if client.debug { - var prettyJSON bytes.Buffer - err = json.Indent(&prettyJSON, body, "", " ") - log.Println(string(prettyJSON.Bytes())) - } - - if statusCode >= 400 && statusCode <= 599 { - errorResponse := ErrorResponse{} - err = json.Unmarshal(body, &errorResponse) - ecsError := &Error{ - ErrorResponse: errorResponse, - StatusCode: statusCode, - } - return ecsError - } - - err = json.Unmarshal(body, response) - //log.Printf("%++v", response) - if err != nil { - return GetClientError(err) - } - - return nil -} - -// GenerateClientToken generates the Client Token with random string -func (client *Client) GenerateClientToken() string { - return util.CreateRandomString() -} - -func GetClientErrorFromString(str string) error { - return &Error{ - ErrorResponse: ErrorResponse{ - Code: "AliyunGoClientFailure", - Message: str, - }, - StatusCode: -1, - } -} - -func GetClientError(err error) error { - return GetClientErrorFromString(err.Error()) -} diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoint.go b/vendor/github.com/denverdino/aliyungo/common/endpoint.go deleted file mode 100644 index 16bcbf9..0000000 --- a/vendor/github.com/denverdino/aliyungo/common/endpoint.go +++ /dev/null @@ -1,118 +0,0 @@ -package common - -import ( - "encoding/xml" - "fmt" - "io/ioutil" - "os" - "strings" -) - -const ( - // LocationDefaultEndpoint is the default API endpoint of Location services - locationDefaultEndpoint = "https://location.aliyuncs.com" - locationAPIVersion = "2015-06-12" - HTTP_PROTOCOL = "http" - HTTPS_PROTOCOL = "https" -) - -var ( - endpoints = make(map[Region]map[string]string) -) - -//init endpoints from file -func init() { - -} - -func NewLocationClient(accessKeyId, accessKeySecret string) *Client { - endpoint := os.Getenv("LOCATION_ENDPOINT") - if endpoint == "" { - endpoint = locationDefaultEndpoint - } - - client := &Client{} - client.Init(endpoint, locationAPIVersion, accessKeyId, accessKeySecret) - return client -} - -func (client *Client) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEndpointResponse, error) { - response := &DescribeEndpointResponse{} - err := client.Invoke("DescribeEndpoint", args, response) - if err != nil { - return nil, err - } - return response, err -} - -func getProductRegionEndpoint(region Region, serviceCode string) string { - if sp, ok := endpoints[region]; ok { - if endpoint, ok := sp[serviceCode]; ok { - return endpoint - } - } - - return "" -} - -func setProductRegionEndpoint(region Region, serviceCode string, endpoint string) { - endpoints[region] = map[string]string{ - serviceCode: endpoint, - } -} - -func (client *Client) DescribeOpenAPIEndpoint(region Region, serviceCode string) string { - if endpoint := getProductRegionEndpoint(region, serviceCode); endpoint != "" { - return endpoint - } - - defaultProtocols := HTTP_PROTOCOL - - args := &DescribeEndpointArgs{ - Id: region, - ServiceCode: serviceCode, - Type: "openAPI", - } - - endpoint, err := client.DescribeEndpoint(args) - if err != nil || endpoint.Endpoint == "" { - return "" - } - - for _, protocol := range endpoint.Protocols.Protocols { - if strings.ToLower(protocol) == HTTPS_PROTOCOL { - defaultProtocols = HTTPS_PROTOCOL - break - } - } - - ep := fmt.Sprintf("%s://%s", defaultProtocols, endpoint.Endpoint) - - setProductRegionEndpoint(region, serviceCode, ep) - return ep -} - -func loadEndpointFromFile(region Region, serviceCode string) string { - data, err := ioutil.ReadFile("./endpoints.xml") - if err != nil { - return "" - } - - var endpoints Endpoints - err = xml.Unmarshal(data, &endpoints) - if err != nil { - return "" - } - - for _, endpoint := range endpoints.Endpoint { - if endpoint.RegionIds.RegionId == string(region) { - for _, product := range endpoint.Products.Product { - if strings.ToLower(product.ProductName) == serviceCode { - return fmt.Sprintf("%s://%s", HTTPS_PROTOCOL, product.DomainName) - } - } - } - } - - return "" -} diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoints.xml b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml deleted file mode 100644 index 8e781ac..0000000 --- a/vendor/github.com/denverdino/aliyungo/common/endpoints.xml +++ /dev/null @@ -1,1351 +0,0 @@ - - - - jp-fudao-1 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - me-east-1 - - Rdsrds.me-east-1.aliyuncs.com - Ecsecs.me-east-1.aliyuncs.com - Vpcvpc.me-east-1.aliyuncs.com - Kmskms.me-east-1.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.me-east-1.aliyuncs.com - - - - us-east-1 - - CScs.aliyuncs.com - Pushcloudpush.aliyuncs.com - COScos.aliyuncs.com - Essess.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Emremr.aliyuncs.com - Smssms.aliyuncs.com - Jaqjaq.aliyuncs.com - HPChpc.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Msgmsg-inner.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Bssbss.aliyuncs.com - Workorderworkorder.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Greengreen.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Mscmsc-inner.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - jaqjaq.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Ubsmsubsms.aliyuncs.com - Vpcvpc.aliyuncs.com - Alertalert.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - AMSams.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - BatchComputebatchCompute.aliyuncs.com - CFcf.aliyuncs.com - Drdsdrds.aliyuncs.com - Acsacs.aliyun-inc.com - Httpdnshttpdns-api.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Stssts.aliyuncs.com - Dtsdts.aliyuncs.com - Drcdrc.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - Ossoss-cn-hangzhou.aliyuncs.com - Ramram.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Alidnsalidns.aliyuncs.com - Onsons.aliyuncs.com - Cdncdn.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - - - - ap-northeast-1 - - Rdsrds.ap-northeast-1.aliyuncs.com - Kmskms.ap-northeast-1.aliyuncs.com - Vpcvpc.ap-northeast-1.aliyuncs.com - Ecsecs.ap-northeast-1.aliyuncs.com - Cmsmetrics.ap-northeast-1.aliyuncs.com - Kvstorer-kvstore.ap-northeast-1.aliyuncs.com - Slbslb.ap-northeast-1.aliyuncs.com - - - - cn-hangzhou-bj-b01 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - cn-hongkong - - Pushcloudpush.aliyuncs.com - COScos.aliyuncs.com - Onsons.aliyuncs.com - Essess.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Emremr.aliyuncs.com - Smssms.aliyuncs.com - Jaqjaq.aliyuncs.com - CScs.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - Msgmsg-inner.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Alertalert.aliyuncs.com - Mscmsc-inner.aliyuncs.com - Drcdrc.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Dmdm.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - HPChpc.aliyuncs.com - jaqjaq.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - Vpcvpc.aliyuncs.com - BatchComputebatchCompute.aliyuncs.com - AMSams.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - Bssbss.aliyuncs.com - Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Stssts.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - CFcf.aliyuncs.com - Acsacs.aliyun-inc.com - Httpdnshttpdns-api.aliyuncs.com - Greengreen.aliyuncs.com - Aasaas.aliyuncs.com - Alidnsalidns.aliyuncs.com - Dtsdts.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - Rdsrds.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - Ossoss-cn-hongkong.aliyuncs.com - - - - cn-beijing-nu16-b01 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - cn-beijing-am13-c01 - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - - - - in-west-antgroup-1 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - cn-guizhou-gov - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - - - - in-west-antgroup-2 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - cn-qingdao-cm9 - - CScs.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - COScos.aliyuncs.com - Essess.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Alidnsalidns.aliyuncs.com - Smssms.aliyuncs.com - Drdsdrds.aliyuncs.com - HPChpc.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - Msgmsg-inner.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Alertalert.aliyuncs.com - Mscmsc-inner.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Greengreen.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - jaqjaq.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Ubsmsubsms.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - AMSams.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - Bssbss.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - CFcf.aliyuncs.com - Httpdnshttpdns-api.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Stssts.aliyuncs.com - Dtsdts.aliyuncs.com - Emremr.aliyuncs.com - Drcdrc.aliyuncs.com - Pushcloudpush.aliyuncs.com - Cmsmetrics.aliyuncs.com - Slbslb.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - ROSros.aliyuncs.com - Ossoss-cn-hangzhou.aliyuncs.com - Ramram.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Rdsrds.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Onsons.aliyuncs.com - Cdncdn.aliyuncs.com - - - - tw-snowcloud-kaohsiung - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - cn-shanghai-finance-1 - - Kmskms.cn-shanghai-finance-1.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - Rdsrds.aliyuncs.com - - - - cn-guizhou - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - - - - cn-qingdao-finance - - Ossoss-cn-qdjbp-a.aliyuncs.com - - - - cn-beijing-gov-1 - - Ossoss-cn-haidian-a.aliyuncs.com - Rdsrds.aliyuncs.com - - - - cn-shanghai - - Riskrisk-cn-hangzhou.aliyuncs.com - COScos.aliyuncs.com - HPChpc.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Drcdrc.aliyuncs.com - Alidnsalidns.aliyuncs.com - Smssms.aliyuncs.com - Drdsdrds.aliyuncs.com - CScs.aliyuncs.com - Kmskms.cn-shanghai.aliyuncs.com - Locationlocation.aliyuncs.com - Msgmsg-inner.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Alertalert.aliyuncs.com - Mscmsc-inner.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Greengreen.cn-shanghai.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - Bssbss.aliyuncs.com - Omsoms.aliyuncs.com - Ubsmsubsms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - BatchComputebatchCompute.aliyuncs.com - AMSams.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - Apigatewayapigateway.cn-shanghai.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Stssts.aliyuncs.com - Vpcvpc.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - Ddsmongodb.aliyuncs.com - CFcf.aliyuncs.com - Acsacs.aliyun-inc.com - Httpdnshttpdns-api.aliyuncs.com - Pushcloudpush.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Emremr.aliyuncs.com - Dtsdts.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Jaqjaq.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - jaqjaq.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - Rdsrds.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Onsons.aliyuncs.com - Essess.aliyuncs.com - Ossoss-cn-shanghai.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - - - - cn-shenzhen-inner - - Riskrisk-cn-hangzhou.aliyuncs.com - COScos.aliyuncs.com - Onsons.aliyuncs.com - Essess.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Alidnsalidns.aliyuncs.com - Smssms.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - HPChpc.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - Msgmsg-inner.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - jaqjaq.aliyuncs.com - Mscmsc-inner.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Bssbss.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - Alertalert.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Ubsmsubsms.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - AMSams.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Stssts.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - CFcf.aliyuncs.com - Httpdnshttpdns-api.aliyuncs.com - Greengreen.aliyuncs.com - Aasaas.aliyuncs.com - Emremr.aliyuncs.com - CScs.aliyuncs.com - Drcdrc.aliyuncs.com - Pushcloudpush.aliyuncs.com - Cmsmetrics.aliyuncs.com - Slbslb.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - Dtsdts.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - ROSros.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - Rdsrds.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ossoss-cn-hangzhou.aliyuncs.com - - - - cn-fujian - - Ecsecs-cn-hangzhou.aliyuncs.com - Rdsrds.aliyuncs.com - - - - in-mumbai-alipay - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - us-west-1 - - CScs.aliyuncs.com - COScos.aliyuncs.com - Essess.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Stssts.aliyuncs.com - Smssms.aliyuncs.com - Jaqjaq.aliyuncs.com - Pushcloudpush.aliyuncs.com - Alidnsalidns.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - Msgmsg-inner.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Bssbss.aliyuncs.com - Mscmsc-inner.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Greengreen.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - Alertalert.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - Vpcvpc.aliyuncs.com - BatchComputebatchCompute.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - AMSams.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - CFcf.aliyuncs.com - Acsacs.aliyun-inc.com - Httpdnshttpdns-api.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Emremr.aliyuncs.com - HPChpc.aliyuncs.com - Drcdrc.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - Dtsdts.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - jaqjaq.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Onsons.aliyuncs.com - Ossoss-us-west-1.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - - - - cn-shanghai-inner - - CScs.aliyuncs.com - COScos.aliyuncs.com - Essess.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Emremr.aliyuncs.com - Smssms.aliyuncs.com - Drdsdrds.aliyuncs.com - HPChpc.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Msgmsg-inner.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - jaqjaq.aliyuncs.com - Mscmsc-inner.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Greengreen.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - Bssbss.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Alertalert.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - AMSams.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - CFcf.aliyuncs.com - Httpdnshttpdns-api.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Stssts.aliyuncs.com - Dtsdts.aliyuncs.com - Drcdrc.aliyuncs.com - Pushcloudpush.aliyuncs.com - Cmsmetrics.aliyuncs.com - Slbslb.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - Ossoss-cn-hangzhou.aliyuncs.com - Ramram.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Alidnsalidns.aliyuncs.com - Onsons.aliyuncs.com - Cdncdn.aliyuncs.com - - - - cn-anhui-gov-1 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - cn-hangzhou-finance - - Ossoss-cn-hzjbp-b-console.aliyuncs.com - - - - cn-hangzhou - - CScs.aliyuncs.com - COScos.aliyuncs.com - Essess.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Stssts.aliyuncs.com - Smssms.aliyuncs.com - Msgmsg-inner.aliyuncs.com - Jaqjaq.aliyuncs.com - Pushcloudpush.aliyuncs.com - Livelive.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - Hpchpc.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Alertalert.aliyuncs.com - Mscmsc-inner.aliyuncs.com - Drcdrc.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Dmdm.aliyuncs.com - Greengreen.cn-hangzhou.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - jaqjaq.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - Vpcvpc.aliyuncs.com - BatchComputebatchCompute.aliyuncs.com - Domaindomain.aliyuncs.com - AMSams.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - Ubsmsubsms.aliyuncs.com - Apigatewayapigateway.cn-hangzhou.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - Oascn-hangzhou.oas.aliyuncs.com - CFcf.aliyuncs.com - Acsacs.aliyun-inc.com - Httpdnshttpdns-api.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Alidnsalidns.aliyuncs.com - HPChpc.aliyuncs.com - Emremr.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - Dtsdts.aliyuncs.com - Bssbss.aliyuncs.com - Otsots-pop.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - Rdsrds.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Onsons.aliyuncs.com - Ossoss-cn-hangzhou.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - - - - cn-beijing-inner - - Riskrisk-cn-hangzhou.aliyuncs.com - COScos.aliyuncs.com - HPChpc.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Emremr.aliyuncs.com - Smssms.aliyuncs.com - Drdsdrds.aliyuncs.com - CScs.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Msgmsg-inner.aliyuncs.com - Essess.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Bssbss.aliyuncs.com - Workorderworkorder.aliyuncs.com - Drcdrc.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Dmdm.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Mscmsc-inner.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - jaqjaq.aliyuncs.com - Omsoms.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Alertalert.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - AMSams.aliyuncs.com - Otsots-pop.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Stssts.aliyuncs.com - Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - CFcf.aliyuncs.com - Httpdnshttpdns-api.aliyuncs.com - Greengreen.aliyuncs.com - Aasaas.aliyuncs.com - Alidnsalidns.aliyuncs.com - Pushcloudpush.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Cmsmetrics.aliyuncs.com - Slbslb.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - Dtsdts.aliyuncs.com - Domaindomain.aliyuncs.com - ROSros.aliyuncs.com - Ossoss-cn-hangzhou.aliyuncs.com - Ramram.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Onsons.aliyuncs.com - Cdncdn.aliyuncs.com - - - - cn-haidian-cm12-c01 - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - Rdsrds.aliyuncs.com - - - - cn-anhui-gov - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - - - - cn-shenzhen - - CScs.aliyuncs.com - COScos.aliyuncs.com - Onsons.aliyuncs.com - Essess.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Alidnsalidns.aliyuncs.com - Smssms.aliyuncs.com - Jaqjaq.aliyuncs.com - Pushcloudpush.aliyuncs.com - Kmskms.cn-shenzhen.aliyuncs.com - Locationlocation.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Alertalert.aliyuncs.com - Drcdrc.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Iotiot.aliyuncs.com - HPChpc.aliyuncs.com - Bssbss.aliyuncs.com - Omsoms.aliyuncs.com - Ubsmsubsms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - BatchComputebatchcompute.cn-shenzhen.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - Apigatewayapigateway.cn-shenzhen.aliyuncs.com - CloudAPIapigateway.cn-shenzhen.aliyuncs.com - Stssts.aliyuncs.com - Vpcvpc.aliyuncs.com - Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - Oascn-shenzhen.oas.aliyuncs.com - CFcf.aliyuncs.com - Acsacs.aliyun-inc.com - Crmcrm-cn-hangzhou.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Emremr.aliyuncs.com - Dtsdts.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - jaqjaq.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Greengreen.aliyuncs.com - Httpdnshttpdns-api.aliyuncs.com - Ossoss-cn-shenzhen.aliyuncs.com - - - - ap-southeast-2 - - Rdsrds.ap-southeast-2.aliyuncs.com - Kmskms.ap-southeast-2.aliyuncs.com - Vpcvpc.ap-southeast-2.aliyuncs.com - Ecsecs.ap-southeast-2.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.ap-southeast-2.aliyuncs.com - - - - cn-qingdao - - CScs.aliyuncs.com - COScos.aliyuncs.com - HPChpc.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Emremr.cn-qingdao.aliyuncs.com - Smssms.aliyuncs.com - Jaqjaq.aliyuncs.com - Dtsdts.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - Essess.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Alertalert.aliyuncs.com - Drcdrc.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.cn-qingdao.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Dmdm.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Iotiot.aliyuncs.com - Bssbss.aliyuncs.com - Omsoms.aliyuncs.com - Ubsmsubsms.cn-qingdao.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - BatchComputebatchcompute.cn-qingdao.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - Otsots-pop.aliyuncs.com - PTSpts.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - Apigatewayapigateway.cn-qingdao.aliyuncs.com - CloudAPIapigateway.cn-qingdao.aliyuncs.com - Stssts.aliyuncs.com - Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - CFcf.aliyuncs.com - Acsacs.aliyun-inc.com - Httpdnshttpdns-api.aliyuncs.com - Greengreen.aliyuncs.com - Aasaas.aliyuncs.com - Alidnsalidns.aliyuncs.com - Pushcloudpush.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - Domaindomain.aliyuncs.com - ROSros.aliyuncs.com - jaqjaq.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Onsons.aliyuncs.com - Ossoss-cn-qingdao.aliyuncs.com - - - - cn-shenzhen-su18-b02 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - cn-shenzhen-su18-b03 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - cn-shenzhen-su18-b01 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - ap-southeast-antgroup-1 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - oss-cn-bjzwy - - Ossoss-cn-bjzwy.aliyuncs.com - - - - cn-henan-am12001 - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - - - - cn-beijing - - CScs.aliyuncs.com - COScos.aliyuncs.com - Jaqjaq.aliyuncs.com - Essess.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Stssts.aliyuncs.com - Smssms.aliyuncs.com - Msgmsg-inner.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - HPChpc.aliyuncs.com - Oascn-beijing.oas.aliyuncs.com - Locationlocation.aliyuncs.com - Onsons.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Hpchpc.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - jaqjaq.aliyuncs.com - Workorderworkorder.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Bssbss.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Mscmsc-inner.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - Alertalert.aliyuncs.com - Omsoms.aliyuncs.com - Ubsmsubsms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - BatchComputebatchCompute.aliyuncs.com - AMSams.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - Apigatewayapigateway.cn-beijing.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Kmskms.cn-beijing.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - CFcf.aliyuncs.com - Acsacs.aliyun-inc.com - Httpdnshttpdns-api.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Emremr.aliyuncs.com - Dtsdts.aliyuncs.com - Drcdrc.aliyuncs.com - Pushcloudpush.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - Ossoss-cn-beijing.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - Rdsrds.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Alidnsalidns.aliyuncs.com - Greengreen.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Cdncdn.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - - - - cn-hangzhou-d - - CScs.aliyuncs.com - COScos.aliyuncs.com - Essess.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Emremr.aliyuncs.com - Smssms.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Dtsdts.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - Msgmsg-inner.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Bssbss.aliyuncs.com - Mscmsc-inner.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Alidnsalidns.aliyuncs.com - Iotiot.aliyuncs.com - HPChpc.aliyuncs.com - jaqjaq.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Alertalert.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - AMSams.aliyuncs.com - Otsots-pop.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - CFcf.aliyuncs.com - Httpdnshttpdns-api.aliyuncs.com - Greengreen.aliyuncs.com - Aasaas.aliyuncs.com - Stssts.aliyuncs.com - Pushcloudpush.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Cmsmetrics.aliyuncs.com - Slbslb.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - Domaindomain.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - ROSros.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Onsons.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Drcdrc.aliyuncs.com - Ossoss-cn-hangzhou.aliyuncs.com - - - - cn-gansu-am6 - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - Rdsrds.aliyuncs.com - - - - cn-ningxiazhongwei - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - - - - cn-shanghai-et2-b01 - - CScs.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - COScos.aliyuncs.com - Onsons.aliyuncs.com - Essess.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Alidnsalidns.aliyuncs.com - Smssms.aliyuncs.com - Jaqjaq.aliyuncs.com - Dtsdts.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com - Locationlocation.aliyuncs.com - Msgmsg-inner.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Bssbss.aliyuncs.com - Mscmsc-inner.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Dmdm.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - jaqjaq.aliyuncs.com - Omsoms.aliyuncs.com - Ubsmsubsms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Ace-opsace-ops.cn-hangzhou.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - BatchComputebatchCompute.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - AMSams.aliyuncs.com - Otsots-pop.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - CFcf.aliyuncs.com - Acsacs.aliyun-inc.com - Httpdnshttpdns-api.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Stssts.aliyuncs.com - HPChpc.aliyuncs.com - Emremr.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Pushcloudpush.aliyuncs.com - Cmsmetrics.aliyuncs.com - Slbslb.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - Alertalert.aliyuncs.com - Domaindomain.aliyuncs.com - ROSros.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Drdsdrds.aliyuncs.com - Vpc-innervpc-inner.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Greengreen.aliyuncs.com - Drcdrc.aliyuncs.com - Ossoss-cn-hangzhou.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - - - - cn-ningxia-am7-c01 - - Ecsecs-cn-hangzhou.aliyuncs.com - Vpcvpc.aliyuncs.com - - - - cn-shenzhen-finance-1 - - Kmskms.cn-shenzhen-finance-1.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - Rdsrds.aliyuncs.com - Vpcvpc.aliyuncs.com - - - - ap-southeast-1 - - CScs.aliyuncs.com - Riskrisk-cn-hangzhou.aliyuncs.com - COScos.aliyuncs.com - Essess.aliyuncs.com - Billingbilling.aliyuncs.com - Dqsdqs.aliyuncs.com - Ddsmongodb.aliyuncs.com - Alidnsalidns.aliyuncs.com - Smssms.aliyuncs.com - Drdsdrds.aliyuncs.com - Dtsdts.aliyuncs.com - Kmskms.ap-southeast-1.aliyuncs.com - Locationlocation.aliyuncs.com - Msgmsg-inner.aliyuncs.com - ChargingServicechargingservice.aliyuncs.com - R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com - Alertalert.aliyuncs.com - Mscmsc-inner.aliyuncs.com - HighDDosyd-highddos-cn-hangzhou.aliyuncs.com - Yundunyundun-cn-hangzhou.aliyuncs.com - Ubsms-innerubsms-inner.aliyuncs.com - Ocsm-kvstore.aliyuncs.com - Dmdm.aliyuncs.com - Greengreen.aliyuncs.com - Commondrivercommon.driver.aliyuncs.com - oceanbaseoceanbase.aliyuncs.com - Workorderworkorder.aliyuncs.com - Yundunhsmyundunhsm.aliyuncs.com - Iotiot.aliyuncs.com - HPChpc.aliyuncs.com - jaqjaq.aliyuncs.com - Omsoms.aliyuncs.com - livelive.aliyuncs.com - Ecsecs-cn-hangzhou.aliyuncs.com - M-kvstorem-kvstore.aliyuncs.com - Vpcvpc.aliyuncs.com - BatchComputebatchCompute.aliyuncs.com - AMSams.aliyuncs.com - ROSros.aliyuncs.com - PTSpts.aliyuncs.com - Qualitycheckqualitycheck.aliyuncs.com - Bssbss.aliyuncs.com - Ubsmsubsms.aliyuncs.com - Apigatewayapigateway.ap-southeast-1.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com - Stssts.aliyuncs.com - CmsSiteMonitorsitemonitor.aliyuncs.com - Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com - CFcf.aliyuncs.com - Crmcrm-cn-hangzhou.aliyuncs.com - Location-innerlocation-inner.aliyuncs.com - Aasaas.aliyuncs.com - Emremr.ap-southeast-1.aliyuncs.com - Httpdnshttpdns-api.aliyuncs.com - Drcdrc.aliyuncs.com - Pushcloudpush.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.aliyuncs.com - YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com - Domaindomain.aliyuncs.com - Otsots-pop.aliyuncs.com - Cdncdn.aliyuncs.com - Ramram.aliyuncs.com - Salessales.cn-hangzhou.aliyuncs.com - Rdsrds.aliyuncs.com - OssAdminoss-admin.aliyuncs.com - Onsons.aliyuncs.com - Ossoss-ap-southeast-1.aliyuncs.com - - - - cn-shenzhen-st4-d01 - - Ecsecs-cn-hangzhou.aliyuncs.com - - - - eu-central-1 - - Rdsrds.eu-central-1.aliyuncs.com - Ecsecs.eu-central-1.aliyuncs.com - Vpcvpc.eu-central-1.aliyuncs.com - Kmskms.eu-central-1.aliyuncs.com - Cmsmetrics.cn-hangzhou.aliyuncs.com - Slbslb.eu-central-1.aliyuncs.com - - - \ No newline at end of file diff --git a/vendor/github.com/denverdino/aliyungo/common/regions.go b/vendor/github.com/denverdino/aliyungo/common/regions.go deleted file mode 100644 index 62e6e9d..0000000 --- a/vendor/github.com/denverdino/aliyungo/common/regions.go +++ /dev/null @@ -1,34 +0,0 @@ -package common - -// Region represents ECS region -type Region string - -// Constants of region definition -const ( - Hangzhou = Region("cn-hangzhou") - Qingdao = Region("cn-qingdao") - Beijing = Region("cn-beijing") - Hongkong = Region("cn-hongkong") - Shenzhen = Region("cn-shenzhen") - Shanghai = Region("cn-shanghai") - Zhangjiakou = Region("cn-zhangjiakou") - - APSouthEast1 = Region("ap-southeast-1") - APNorthEast1 = Region("ap-northeast-1") - APSouthEast2 = Region("ap-southeast-2") - - USWest1 = Region("us-west-1") - USEast1 = Region("us-east-1") - - MEEast1 = Region("me-east-1") - - EUCentral1 = Region("eu-central-1") -) - -var ValidRegions = []Region{ - Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, Zhangjiakou, - USWest1, USEast1, - APNorthEast1, APSouthEast1, APSouthEast2, - MEEast1, - EUCentral1, -} diff --git a/vendor/github.com/denverdino/aliyungo/common/request.go b/vendor/github.com/denverdino/aliyungo/common/request.go deleted file mode 100644 index 2a883f1..0000000 --- a/vendor/github.com/denverdino/aliyungo/common/request.go +++ /dev/null @@ -1,101 +0,0 @@ -package common - -import ( - "fmt" - "log" - "time" - - "github.com/denverdino/aliyungo/util" -) - -// Constants for Aliyun API requests -const ( - SignatureVersion = "1.0" - SignatureMethod = "HMAC-SHA1" - JSONResponseFormat = "JSON" - XMLResponseFormat = "XML" - ECSRequestMethod = "GET" -) - -type Request struct { - Format string - Version string - AccessKeyId string - Signature string - SignatureMethod string - Timestamp util.ISO6801Time - SignatureVersion string - SignatureNonce string - ResourceOwnerAccount string - Action string -} - -func (request *Request) init(version string, action string, AccessKeyId string) { - request.Format = JSONResponseFormat - request.Timestamp = util.NewISO6801Time(time.Now().UTC()) - request.Version = version - request.SignatureVersion = SignatureVersion - request.SignatureMethod = SignatureMethod - request.SignatureNonce = util.CreateRandomString() - request.Action = action - request.AccessKeyId = AccessKeyId -} - -type Response struct { - RequestId string -} - -type ErrorResponse struct { - Response - HostId string - Code string - Message string -} - -// An Error represents a custom error for Aliyun API failure response -type Error struct { - ErrorResponse - StatusCode int //Status Code of HTTP Response -} - -func (e *Error) Error() string { - return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) -} - -type Pagination struct { - PageNumber int - PageSize int -} - -func (p *Pagination) SetPageSize(size int) { - p.PageSize = size -} - -func (p *Pagination) Validate() { - if p.PageNumber < 0 { - log.Printf("Invalid PageNumber: %d", p.PageNumber) - p.PageNumber = 1 - } - if p.PageSize < 0 { - log.Printf("Invalid PageSize: %d", p.PageSize) - p.PageSize = 10 - } else if p.PageSize > 50 { - log.Printf("Invalid PageSize: %d", p.PageSize) - p.PageSize = 50 - } -} - -// A PaginationResponse represents a response with pagination information -type PaginationResult struct { - TotalCount int - PageNumber int - PageSize int -} - -// NextPage gets the next page of the result set -func (r *PaginationResult) NextPage() *Pagination { - if r.PageNumber*r.PageSize >= r.TotalCount { - return nil - } - return &Pagination{PageNumber: r.PageNumber + 1, PageSize: r.PageSize} -} diff --git a/vendor/github.com/denverdino/aliyungo/common/types.go b/vendor/github.com/denverdino/aliyungo/common/types.go deleted file mode 100644 index a74e150..0000000 --- a/vendor/github.com/denverdino/aliyungo/common/types.go +++ /dev/null @@ -1,89 +0,0 @@ -package common - -type InternetChargeType string - -const ( - PayByBandwidth = InternetChargeType("PayByBandwidth") - PayByTraffic = InternetChargeType("PayByTraffic") -) - -type InstanceChargeType string - -const ( - PrePaid = InstanceChargeType("PrePaid") - PostPaid = InstanceChargeType("PostPaid") -) - -type DescribeEndpointArgs struct { - Id Region - ServiceCode string - Type string -} - -type EndpointItem struct { - Protocols struct { - Protocols []string - } - Type string - Namespace string - Id Region - SerivceCode string - Endpoint string -} - -type DescribeEndpointResponse struct { - Response - EndpointItem -} - -type NetType string - -const ( - Internet = NetType("Internet") - Intranet = NetType("Intranet") -) - -type TimeType string - -const ( - Hour = TimeType("Hour") - Day = TimeType("Day") - Month = TimeType("Month") - Year = TimeType("Year") -) - -type NetworkType string - -const ( - Classic = NetworkType("Classic") - VPC = NetworkType("VPC") -) - -type BusinessInfo struct { - Pack string `json:"pack,omitempty"` - ActivityId string `json:"activityId,omitempty"` -} - -//xml -type Endpoints struct { - Endpoint []Endpoint `xml:"Endpoint"` -} - -type Endpoint struct { - Name string `xml:"name,attr"` - RegionIds RegionIds `xml:"RegionIds"` - Products Products `xml:"Products"` -} - -type RegionIds struct { - RegionId string `xml:"RegionId"` -} - -type Products struct { - Product []Product `xml:"Product"` -} - -type Product struct { - ProductName string `xml:"ProductName"` - DomainName string `xml:"DomainName"` -} diff --git a/vendor/github.com/denverdino/aliyungo/common/version.go b/vendor/github.com/denverdino/aliyungo/common/version.go deleted file mode 100644 index 7cb3d3a..0000000 --- a/vendor/github.com/denverdino/aliyungo/common/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package common - -const Version = "0.1" diff --git a/vendor/github.com/denverdino/aliyungo/oss/authenticate_callback.go b/vendor/github.com/denverdino/aliyungo/oss/authenticate_callback.go deleted file mode 100644 index 658e631..0000000 --- a/vendor/github.com/denverdino/aliyungo/oss/authenticate_callback.go +++ /dev/null @@ -1,92 +0,0 @@ -package oss - -import ( - "encoding/base64" - "regexp" - "errors" - "strings" - "sync" - "net/http" - "io/ioutil" - "crypto/x509" - "crypto/rsa" - "crypto" - "fmt" - "encoding/pem" - "crypto/md5" -) - -type authenticationType struct { - lock *sync.RWMutex - certificate map[string]*rsa.PublicKey -} - -var ( - authentication = authenticationType{lock:&sync.RWMutex{}, certificate: map[string]*rsa.PublicKey{}} - urlReg = regexp.MustCompile(`^http(|s)://gosspublic.alicdn.com/[0-9a-zA-Z]`) -) -//验证OSS向业务服务器发来的回调函数。 -//该方法是并发安全的 -//pubKeyUrl 回调请求头中[x-oss-pub-key-url]一项,以Base64编码 -//reqUrl oss所发来请求的url,由path+query组成 -//reqBody oss所发来请求的body -//authorization authorization为回调头中的签名 -func AuthenticateCallBack(pubKeyUrl, reqUrl, reqBody, authorization string) error { - //获取证书url - keyURL, err := base64.URLEncoding.DecodeString(pubKeyUrl) - if err != nil { - return err - } - url := string(keyURL) - //判断证书是否来自于阿里云 - if !urlReg.Match(keyURL) { - return errors.New("证书地址有误。") - } - //获取文件名 - rs := []rune(url) - filename := string(rs[strings.LastIndex(url, "/"): len(rs) - 1]) - authentication.lock.RLock() - certificate := authentication.certificate[filename] - authentication.lock.RUnlock() - fmt.Println("准备证书") - //内存中没有证书,下载 - if certificate == nil { - authentication.lock.Lock() - res, err := http.Get(url) - if err != nil { - return err - } - defer res.Body.Close() - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return err - } - fmt.Println(string(body)) - block, _ := pem.Decode(body) - if block == nil { - return errors.New("证书有误。") - } - pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return err - } - certificate = pubKey.(*rsa.PublicKey) - authentication.certificate[filename] = certificate - authentication.lock.Unlock() - } - //证书准备完毕,开始验证 - fmt.Println("准备开始验证") - //解析签名 - signature, err := base64.StdEncoding.DecodeString(authorization) - if err != nil { - return err - } - hashed := md5.New() - hashed.Write([]byte(reqUrl + "\n" + reqBody)) - fmt.Println(reqUrl + "\n" + reqBody) - if err := rsa.VerifyPKCS1v15(certificate, crypto.MD5, hashed.Sum(nil), signature); err != nil { - return err - } - //验证通过 - return nil -} diff --git a/vendor/github.com/denverdino/aliyungo/oss/client.go b/vendor/github.com/denverdino/aliyungo/oss/client.go deleted file mode 100644 index c5e13e5..0000000 --- a/vendor/github.com/denverdino/aliyungo/oss/client.go +++ /dev/null @@ -1,1394 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "log" - "mime" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path" - "strconv" - "strings" - "time" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/util" -) - -const DefaultContentType = "application/octet-stream" - -// The Client type encapsulates operations with an OSS region. -type Client struct { - AccessKeyId string - AccessKeySecret string - SecurityToken string - Region Region - Internal bool - Secure bool - ConnectTimeout time.Duration - - endpoint string - debug bool -} - -// The Bucket type encapsulates operations with an bucket. -type Bucket struct { - *Client - Name string -} - -// The Owner type represents the owner of the object in an bucket. -type Owner struct { - ID string - DisplayName string -} - -// Options struct -// -type Options struct { - ServerSideEncryption bool - Meta map[string][]string - ContentEncoding string - CacheControl string - ContentMD5 string - ContentDisposition string - //Range string - //Expires int -} - -type CopyOptions struct { - Headers http.Header - CopySourceOptions string - MetadataDirective string - //ContentType string -} - -// CopyObjectResult is the output from a Copy request -type CopyObjectResult struct { - ETag string - LastModified string -} - -var attempts = util.AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, -} - -// NewOSSClient creates a new OSS. - -func NewOSSClientForAssumeRole(region Region, internal bool, accessKeyId string, accessKeySecret string, securityToken string, secure bool) *Client { - return &Client{ - AccessKeyId: accessKeyId, - AccessKeySecret: accessKeySecret, - SecurityToken: securityToken, - Region: region, - Internal: internal, - debug: false, - Secure: secure, - } -} - -func NewOSSClient(region Region, internal bool, accessKeyId string, accessKeySecret string, secure bool) *Client { - return &Client{ - AccessKeyId: accessKeyId, - AccessKeySecret: accessKeySecret, - Region: region, - Internal: internal, - debug: false, - Secure: secure, - } -} - -// SetDebug sets debug mode to log the request/response message -func (client *Client) SetDebug(debug bool) { - client.debug = debug -} - -// Bucket returns a Bucket with the given name. -func (client *Client) Bucket(name string) *Bucket { - name = strings.ToLower(name) - return &Bucket{ - Client: client, - Name: name, - } -} - -type BucketInfo struct { - Name string - CreationDate string - ExtranetEndpoint string - IntranetEndpoint string - Location string - Grant string `xml:"AccessControlList>Grant"` -} - -type GetServiceResp struct { - Owner Owner - Buckets []BucketInfo `xml:">Bucket"` -} - -type GetBucketInfoResp struct { - Bucket BucketInfo -} - -// GetService gets a list of all buckets owned by an account. -func (client *Client) GetService() (*GetServiceResp, error) { - bucket := client.Bucket("") - - r, err := bucket.Get("") - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp GetServiceResp - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -type ACL string - -const ( - Private = ACL("private") - PublicRead = ACL("public-read") - PublicReadWrite = ACL("public-read-write") - AuthenticatedRead = ACL("authenticated-read") - BucketOwnerRead = ACL("bucket-owner-read") - BucketOwnerFull = ACL("bucket-owner-full-control") -) - -var createBucketConfiguration = ` - %s -` - -// locationConstraint returns an io.Reader specifying a LocationConstraint if -// required for the region. -func (client *Client) locationConstraint() io.Reader { - constraint := fmt.Sprintf(createBucketConfiguration, client.Region) - return strings.NewReader(constraint) -} - -// override default endpoint -func (client *Client) SetEndpoint(endpoint string) { - // TODO check endpoint - client.endpoint = endpoint -} - -// Info query basic information about the bucket -// -// You can read doc at https://help.aliyun.com/document_detail/31968.html -func (b *Bucket) Info() (BucketInfo, error) { - params := make(url.Values) - params.Set("bucketInfo", "") - r, err := b.GetWithParams("/", params) - - if err != nil { - return BucketInfo{}, err - } - - // Parse the XML response. - var resp GetBucketInfoResp - if err = xml.Unmarshal(r, &resp); err != nil { - return BucketInfo{}, err - } - - return resp.Bucket, nil -} - -// PutBucket creates a new bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucket -func (b *Bucket) PutBucket(perm ACL) error { - headers := make(http.Header) - if perm != "" { - headers.Set("x-oss-acl", string(perm)) - } - req := &request{ - method: "PUT", - bucket: b.Name, - path: "/", - headers: headers, - payload: b.Client.locationConstraint(), - } - return b.Client.query(req, nil) -} - -// DelBucket removes an existing bucket. All objects in the bucket must -// be removed before the bucket itself can be removed. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&DeleteBucket -func (b *Bucket) DelBucket() (err error) { - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - } - - err = b.Client.query(req, nil) - if !shouldRetry(err) { - break - } - } - return err -} - -// Get retrieves an object from an bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&GetObject -func (b *Bucket) Get(path string) (data []byte, err error) { - body, err := b.GetReader(path) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(body) - body.Close() - return data, err -} - -// GetReader retrieves an object from an bucket, -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading. -func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { - resp, err := b.GetResponse(path) - if resp != nil { - return resp.Body, err - } - return nil, err -} - -// GetResponse retrieves an object from an bucket, -// returning the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { - return b.GetResponseWithHeaders(path, make(http.Header)) -} - -// GetResponseWithHeaders retrieves an object from an bucket -// Accepts custom headers to be sent as the second parameter -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) { - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - bucket: b.Name, - path: path, - headers: headers, - } - err = b.Client.prepare(req) - if err != nil { - return nil, err - } - - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Get retrieves an object from an bucket. -func (b *Bucket) GetWithParams(path string, params url.Values) (data []byte, err error) { - resp, err := b.GetResponseWithParamsAndHeaders(path, params, nil) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - return data, err -} - -func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) { - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - bucket: b.Name, - path: path, - params: params, - headers: headers, - } - err = b.Client.prepare(req) - if err != nil { - return nil, err - } - - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Exists checks whether or not an object exists on an bucket using a HEAD request. -func (b *Bucket) Exists(path string) (exists bool, err error) { - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - } - err = b.Client.prepare(req) - if err != nil { - return - } - - resp, err := b.Client.run(req, nil) - - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - // We can treat a 403 or 404 as non existance - if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { - return false, nil - } - return false, err - } - - if resp.StatusCode/100 == 2 { - exists = true - } - if resp.Body != nil { - resp.Body.Close() - } - return exists, err - } - return false, fmt.Errorf("OSS Currently Unreachable") -} - -// Head HEADs an object in the bucket, returns the response with -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&HeadObject -func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) { - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - headers: headers, - } - err := b.Client.prepare(req) - if err != nil { - return nil, err - } - - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return resp, err - } - return nil, fmt.Errorf("OSS Currently Unreachable") -} - -// Put inserts an object into the bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&PutObject -func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { - body := bytes.NewBuffer(data) - return b.PutReader(path, body, int64(len(data)), contType, perm, options) -} - -// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&CopyObject -func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { - headers := make(http.Header) - - headers.Set("x-oss-acl", string(perm)) - headers.Set("x-oss-copy-source", source) - - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - timeout: 5 * time.Minute, - } - resp := &CopyObjectResult{} - err := b.Client.query(req, resp) - if err != nil { - return resp, err - } - return resp, nil -} - -// PutReader inserts an object into the bucket by consuming data -// from r until EOF. -func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(length, 10)) - headers.Set("Content-Type", contType) - headers.Set("x-oss-acl", string(perm)) - - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - payload: r, - } - return b.Client.query(req, nil) -} - -// PutFile creates/updates object with file -func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options) error { - var contentType string - if dotPos := strings.LastIndex(file.Name(), "."); dotPos == -1 { - contentType = DefaultContentType - } else { - if mimeType := mime.TypeByExtension(file.Name()[dotPos:]); mimeType == "" { - contentType = DefaultContentType - } else { - contentType = mimeType - } - } - stats, err := file.Stat() - if err != nil { - log.Printf("Unable to read file %s stats.\n", file.Name()) - return err - } - - return b.PutReader(path, file, stats.Size(), contentType, perm, options) -} - -// addHeaders adds o's specified fields to headers -func (o Options) addHeaders(headers http.Header) { - if o.ServerSideEncryption { - headers.Set("x-oss-server-side-encryption", "AES256") - } - if len(o.ContentEncoding) != 0 { - headers.Set("Content-Encoding", o.ContentEncoding) - } - if len(o.CacheControl) != 0 { - headers.Set("Cache-Control", o.CacheControl) - } - if len(o.ContentMD5) != 0 { - headers.Set("Content-MD5", o.ContentMD5) - } - if len(o.ContentDisposition) != 0 { - headers.Set("Content-Disposition", o.ContentDisposition) - } - - for k, v := range o.Meta { - for _, mv := range v { - headers.Add("x-oss-meta-"+k, mv) - } - } -} - -// addHeaders adds o's specified fields to headers -func (o CopyOptions) addHeaders(headers http.Header) { - if len(o.MetadataDirective) != 0 { - headers.Set("x-oss-metadata-directive", o.MetadataDirective) - } - if len(o.CopySourceOptions) != 0 { - headers.Set("x-oss-copy-source-range", o.CopySourceOptions) - } - if o.Headers != nil { - for k, v := range o.Headers { - newSlice := make([]string, len(v)) - copy(newSlice, v) - headers[k] = newSlice - } - } -} - -func makeXMLBuffer(doc []byte) *bytes.Buffer { - buf := new(bytes.Buffer) - buf.WriteString(xml.Header) - buf.Write(doc) - return buf -} - -type IndexDocument struct { - Suffix string `xml:"Suffix"` -} - -type ErrorDocument struct { - Key string `xml:"Key"` -} - -type RoutingRule struct { - ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` - RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` - RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` -} - -type RedirectAllRequestsTo struct { - HostName string `xml:"HostName"` - Protocol string `xml:"Protocol,omitempty"` -} - -type WebsiteConfiguration struct { - XMLName xml.Name `xml:"http://doc.oss-cn-hangzhou.aliyuncs.com WebsiteConfiguration"` - IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` - ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` - RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` - RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` -} - -// PutBucketWebsite configures a bucket as a website. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucketWebsite -func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { - doc, err := xml.Marshal(configuration) - if err != nil { - return err - } - - buf := makeXMLBuffer(doc) - - return b.PutBucketSubresource("website", buf, int64(buf.Len())) -} - -func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(length, 10)) - - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: r, - params: url.Values{subresource: {""}}, - } - - return b.Client.query(req, nil) -} - -// Del removes an object from the bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteObject -func (b *Bucket) Del(path string) error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: path, - } - return b.Client.query(req, nil) -} - -type Delete struct { - Quiet bool `xml:"Quiet,omitempty"` - Objects []Object `xml:"Object"` -} - -type Object struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId,omitempty"` -} - -// DelMulti removes up to 1000 objects from the bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteMultipleObjects -func (b *Bucket) DelMulti(objects Delete) error { - doc, err := xml.Marshal(objects) - if err != nil { - return err - } - - buf := makeXMLBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(int64(size), 10)) - headers.Set("Content-MD5", base64.StdEncoding.EncodeToString(digest.Sum(nil))) - headers.Set("Content-Type", "text/xml") - - req := &request{ - path: "/", - method: "POST", - params: url.Values{"delete": {""}}, - bucket: b.Name, - headers: headers, - payload: buf, - } - - return b.Client.query(req, nil) -} - -// The ListResp type holds the results of a List bucket operation. -type ListResp struct { - Name string - Prefix string - Delimiter string - Marker string - MaxKeys int - // IsTruncated is true if the results have been truncated because - // there are more keys and prefixes than can fit in MaxKeys. - // N.B. this is the opposite sense to that documented (incorrectly) in - // http://goo.gl/YjQTc - IsTruncated bool - Contents []Key - CommonPrefixes []string `xml:">Prefix"` - // if IsTruncated is true, pass NextMarker as marker argument to List() - // to get the next set of keys - NextMarker string -} - -// The Key type represents an item stored in an bucket. -type Key struct { - Key string - LastModified string - Type string - Size int64 - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - StorageClass string - Owner Owner -} - -// List returns information about objects in an bucket. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// The marker parameter specifies the key to start with when listing objects -// in a bucket. OSS lists objects in alphabetical order and -// will return keys alphabetically greater than the marker. -// -// The max parameter specifies how many keys + common prefixes to return in -// the response, at most 1000. The default is 100. -// -// For example, given these keys in a bucket: -// -// index.html -// index2.html -// photos/2006/January/sample.jpg -// photos/2006/February/sample2.jpg -// photos/2006/February/sample3.jpg -// photos/2006/February/sample4.jpg -// -// Listing this bucket with delimiter set to "/" would yield the -// following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Contents: []Key{ -// {Key: "index.html", "index2.html"}, -// }, -// CommonPrefixes: []string{ -// "photos/", -// }, -// } -// -// Listing the same bucket with delimiter set to "/" and prefix set to -// "photos/2006/" would yield the following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Prefix: "photos/2006/", -// CommonPrefixes: []string{ -// "photos/2006/February/", -// "photos/2006/January/", -// }, -// } -// -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucket -func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { - params := make(url.Values) - params.Set("prefix", prefix) - params.Set("delimiter", delim) - params.Set("marker", marker) - if max != 0 { - params.Set("max-keys", strconv.FormatInt(int64(max), 10)) - } - result = &ListResp{} - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - bucket: b.Name, - params: params, - } - err = b.Client.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - // if NextMarker is not returned, it should be set to the name of last key, - // so let's do it so that each caller doesn't have to - if result.IsTruncated && result.NextMarker == "" { - n := len(result.Contents) - if n > 0 { - result.NextMarker = result.Contents[n-1].Key - } - } - return result, nil -} - -type GetLocationResp struct { - Location string `xml:",innerxml"` -} - -func (b *Bucket) Location() (string, error) { - params := make(url.Values) - params.Set("location", "") - r, err := b.GetWithParams("/", params) - - if err != nil { - return "", err - } - - // Parse the XML response. - var resp GetLocationResp - if err = xml.Unmarshal(r, &resp); err != nil { - return "", err - } - - if resp.Location == "" { - return string(Hangzhou), nil - } - return resp.Location, nil -} - -func (b *Bucket) Path(path string) string { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return "/" + b.Name + path -} - -// URL returns a non-signed URL that allows retriving the -// object at path. It only works if the object is publicly -// readable (see SignedURL). -func (b *Bucket) URL(path string) string { - req := &request{ - bucket: b.Name, - path: path, - } - err := b.Client.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - u.RawQuery = "" - return u.String() -} - -// SignedURL returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURL(path string, expires time.Time) string { - return b.SignedURLWithArgs(path, expires, nil, nil) -} - -// SignedURLWithArgs returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { - return b.SignedURLWithMethod("GET", path, expires, params, headers) -} - -// SignedURLWithMethod returns a signed URL that allows anyone holding the URL -// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. -func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { - var uv = url.Values{} - - if params != nil { - uv = params - } - - uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) - uv.Set("OSSAccessKeyId", b.AccessKeyId) - - req := &request{ - method: method, - bucket: b.Name, - path: path, - params: uv, - headers: headers, - } - err := b.Client.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - - return u.String() -} - -// UploadSignedURL returns a signed URL that allows anyone holding the URL -// to upload the object at path. The signature is valid until expires. -// contenttype is a string like image/png -// name is the resource name in OSS terminology like images/ali.png [obviously excluding the bucket name itself] -func (b *Bucket) UploadSignedURL(name, method, contentType string, expires time.Time) string { - //TODO TESTING - expireDate := expires.Unix() - if method != "POST" { - method = "PUT" - } - - tokenData := "" - - stringToSign := method + "\n\n" + contentType + "\n" + strconv.FormatInt(expireDate, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) - secretKey := b.AccessKeySecret - accessId := b.AccessKeyId - mac := hmac.New(sha1.New, []byte(secretKey)) - mac.Write([]byte(stringToSign)) - macsum := mac.Sum(nil) - signature := base64.StdEncoding.EncodeToString(macsum) - signature = strings.TrimSpace(signature) - - signedurl, err := url.Parse(b.Region.GetEndpoint(b.Internal, b.Name, b.Secure)) - if err != nil { - log.Println("ERROR sining url for OSS upload", err) - return "" - } - signedurl.Path = name - params := url.Values{} - params.Add("OSSAccessKeyId", accessId) - params.Add("Expires", strconv.FormatInt(expireDate, 10)) - params.Add("Signature", signature) - - signedurl.RawQuery = params.Encode() - return signedurl.String() -} - -// PostFormArgsEx returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -// Additional conditions can be specified with conds -func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { - conditions := []string{} - fields = map[string]string{ - "AWSAccessKeyId": b.AccessKeyId, - "key": path, - } - - if conds != nil { - conditions = append(conditions, conds...) - } - - conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) - conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) - if redirect != "" { - conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) - fields["success_action_redirect"] = redirect - } - - vExpiration := expires.Format("2006-01-02T15:04:05Z") - vConditions := strings.Join(conditions, ",") - policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) - policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) - fields["policy"] = policy64 - - signer := hmac.New(sha1.New, []byte(b.AccessKeySecret)) - signer.Write([]byte(policy64)) - fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) - - action = fmt.Sprintf("%s/%s/", b.Client.Region, b.Name) - return -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { - return b.PostFormArgsEx(path, expires, redirect, nil) -} - -type request struct { - method string - bucket string - path string - params url.Values - headers http.Header - baseurl string - payload io.Reader - prepared bool - timeout time.Duration -} - -func (req *request) url() (*url.URL, error) { - u, err := url.Parse(req.baseurl) - if err != nil { - return nil, fmt.Errorf("bad OSS endpoint URL %q: %v", req.baseurl, err) - } - u.RawQuery = req.params.Encode() - u.Path = req.path - return u, nil -} - -// query prepares and runs the req request. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) query(req *request, resp interface{}) error { - err := client.prepare(req) - if err != nil { - return err - } - r, err := client.run(req, resp) - if r != nil && r.Body != nil { - r.Body.Close() - } - return err -} - -// Sets baseurl on req from bucket name and the region endpoint -func (client *Client) setBaseURL(req *request) error { - - if client.endpoint == "" { - req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure) - } else { - req.baseurl = fmt.Sprintf("%s://%s", getProtocol(client.Secure), client.endpoint) - } - - return nil -} - -// partiallyEscapedPath partially escapes the OSS path allowing for all OSS REST API calls. -// -// Some commands including: -// GET Bucket acl http://goo.gl/aoXflF -// GET Bucket cors http://goo.gl/UlmBdx -// GET Bucket lifecycle http://goo.gl/8Fme7M -// GET Bucket policy http://goo.gl/ClXIo3 -// GET Bucket location http://goo.gl/5lh8RD -// GET Bucket Logging http://goo.gl/sZ5ckF -// GET Bucket notification http://goo.gl/qSSZKD -// GET Bucket tagging http://goo.gl/QRvxnM -// require the first character after the bucket name in the path to be a literal '?' and -// not the escaped hex representation '%3F'. -func partiallyEscapedPath(path string) string { - pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") - if len(pathEscapedAndSplit) >= 3 { - if len(pathEscapedAndSplit[2]) >= 3 { - // Check for the one "?" that should not be escaped. - if pathEscapedAndSplit[2][0:3] == "%3F" { - pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] - } - } - } - return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) -} - -// prepare sets up req to be delivered to OSS. -func (client *Client) prepare(req *request) error { - // Copy so they can be mutated without affecting on retries. - headers := copyHeader(req.headers) - if len(client.SecurityToken) != 0 { - headers.Set("x-oss-security-token", client.SecurityToken) - } - - params := make(url.Values) - - for k, v := range req.params { - params[k] = v - } - - req.params = params - req.headers = headers - - if !req.prepared { - req.prepared = true - if req.method == "" { - req.method = "GET" - } - - if !strings.HasPrefix(req.path, "/") { - req.path = "/" + req.path - } - - err := client.setBaseURL(req) - if err != nil { - return err - } - } - - req.headers.Set("Date", util.GetGMTime()) - client.signRequest(req) - - return nil -} - -// Prepares an *http.Request for doHttpRequest -func (client *Client) setupHttpRequest(req *request) (*http.Request, error) { - // Copy so that signing the http request will not mutate it - - u, err := req.url() - if err != nil { - return nil, err - } - u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) - - hreq := http.Request{ - URL: u, - Method: req.method, - ProtoMajor: 1, - ProtoMinor: 1, - Close: true, - Header: req.headers, - Form: req.params, - } - - hreq.Header.Set("X-SDK-Client", `AliyunGO/`+common.Version) - - contentLength := req.headers.Get("Content-Length") - - if contentLength != "" { - hreq.ContentLength, _ = strconv.ParseInt(contentLength, 10, 64) - req.headers.Del("Content-Length") - } - - if req.payload != nil { - hreq.Body = ioutil.NopCloser(req.payload) - } - - return &hreq, nil -} - -// doHttpRequest sends hreq and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) doHttpRequest(c *http.Client, hreq *http.Request, resp interface{}) (*http.Response, error) { - - if true { - log.Printf("%s %s ...\n", hreq.Method, hreq.URL.String()) - } - hresp, err := c.Do(hreq) - if err != nil { - return nil, err - } - if client.debug { - log.Printf("%s %s %d\n", hreq.Method, hreq.URL.String(), hresp.StatusCode) - contentType := hresp.Header.Get("Content-Type") - if contentType == "application/xml" || contentType == "text/xml" { - dump, _ := httputil.DumpResponse(hresp, true) - log.Printf("%s\n", dump) - } else { - log.Printf("Response Content-Type: %s\n", contentType) - } - } - if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { - return nil, client.buildError(hresp) - } - if resp != nil { - err = xml.NewDecoder(hresp.Body).Decode(resp) - hresp.Body.Close() - - if client.debug { - log.Printf("aliyungo.oss> decoded xml into %#v", resp) - } - - } - return hresp, err -} - -// run sends req and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) run(req *request, resp interface{}) (*http.Response, error) { - if client.debug { - log.Printf("Running OSS request: %#v", req) - } - - hreq, err := client.setupHttpRequest(req) - if err != nil { - return nil, err - } - - c := &http.Client{ - Transport: &http.Transport{ - Dial: func(netw, addr string) (c net.Conn, err error) { - if client.ConnectTimeout > 0 { - c, err = net.DialTimeout(netw, addr, client.ConnectTimeout) - } else { - c, err = net.Dial(netw, addr) - } - if err != nil { - return - } - return - }, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: req.timeout, - } - - return client.doHttpRequest(c, hreq, resp) -} - -// Error represents an error in an operation with OSS. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // OSS error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - BucketName string - RequestId string - HostId string -} - -func (e *Error) Error() string { - return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) -} - -func (client *Client) buildError(r *http.Response) error { - if client.debug { - log.Printf("got error (status code %v)", r.StatusCode) - data, err := ioutil.ReadAll(r.Body) - if err != nil { - log.Printf("\tread error: %v", err) - } else { - log.Printf("\tdata:\n%s\n\n", data) - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - } - - err := Error{} - // TODO return error if Unmarshal fails? - xml.NewDecoder(r.Body).Decode(&err) - r.Body.Close() - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - if client.debug { - log.Printf("err: %#v\n", err) - } - return &err -} - -type TimeoutError interface { - error - Timeout() bool // Is the error a timeout? -} - -func shouldRetry(err error) bool { - if err == nil { - return false - } - - _, ok := err.(TimeoutError) - if ok { - return true - } - - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "read", "write": - return true - } - case *url.Error: - // url.Error can be returned either by net/url if a URL cannot be - // parsed, or by net/http if the response is closed before the headers - // are received or parsed correctly. In that later case, e.Op is set to - // the HTTP method name with the first letter uppercased. We don't want - // to retry on POST operations, since those are not idempotent, all the - // other ones should be safe to retry. - switch e.Op { - case "Get", "Put", "Delete", "Head": - return shouldRetry(e.Err) - default: - return false - } - case *Error: - switch e.Code { - case "InternalError", "NoSuchUpload", "NoSuchBucket": - return true - } - } - return false -} - -func hasCode(err error, code string) bool { - e, ok := err.(*Error) - return ok && e.Code == code -} - -func copyHeader(header http.Header) (newHeader http.Header) { - newHeader = make(http.Header) - for k, v := range header { - newSlice := make([]string, len(v)) - copy(newSlice, v) - newHeader[k] = newSlice - } - return -} - -type AccessControlPolicy struct { - Owner Owner - Grants []string `xml:"AccessControlList>Grant"` -} - -// ACL returns ACL of bucket -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucketAcl -func (b *Bucket) ACL() (result *AccessControlPolicy, err error) { - - params := make(url.Values) - params.Set("acl", "") - - r, err := b.GetWithParams("/", params) - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp AccessControlPolicy - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -func (b *Bucket) GetContentLength(sourcePath string) (int64, error) { - resp, err := b.Head(sourcePath, nil) - if err != nil { - return 0, err - } - - currentLength := resp.ContentLength - - return currentLength, err -} - -func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType string, perm ACL, options Options) error { - return b.CopyLargeFileInParallel(sourcePath, destPath, contentType, perm, options, 1) -} - -const defaultChunkSize = int64(128 * 1024 * 1024) //128MB -const maxCopytSize = int64(128 * 1024 * 1024) //128MB - -// Copy large file in the same bucket -func (b *Bucket) CopyLargeFileInParallel(sourcePath string, destPath string, contentType string, perm ACL, options Options, maxConcurrency int) error { - - if maxConcurrency < 1 { - maxConcurrency = 1 - } - - currentLength, err := b.GetContentLength(sourcePath) - - log.Printf("Parallel Copy large file[size: %d] from %s to %s\n",currentLength, sourcePath, destPath) - - if err != nil { - return err - } - - if currentLength < maxCopytSize { - _, err := b.PutCopy(destPath, perm, - CopyOptions{}, - b.Path(sourcePath)) - return err - } - - multi, err := b.InitMulti(destPath, contentType, perm, options) - if err != nil { - return err - } - - numParts := (currentLength + defaultChunkSize - 1) / defaultChunkSize - completedParts := make([]Part, numParts) - - errChan := make(chan error, numParts) - limiter := make(chan struct{}, maxConcurrency) - - var start int64 = 0 - var to int64 = 0 - var partNumber = 0 - sourcePathForCopy := b.Path(sourcePath) - - for start = 0; start < currentLength; start = to { - to = start + defaultChunkSize - if to > currentLength { - to = currentLength - } - partNumber++ - - rangeStr := fmt.Sprintf("bytes=%d-%d", start, to-1) - limiter <- struct{}{} - go func(partNumber int, rangeStr string) { - _, part, err := multi.PutPartCopyWithContentLength(partNumber, - CopyOptions{CopySourceOptions: rangeStr}, - sourcePathForCopy, currentLength) - if err == nil { - completedParts[partNumber-1] = part - } else { - log.Printf("Unable in PutPartCopy of part %d for %s: %v\n", partNumber, sourcePathForCopy, err) - } - errChan <- err - <-limiter - }(partNumber, rangeStr) - } - - fullyCompleted := true - for range completedParts { - err := <-errChan - if err != nil { - fullyCompleted = false - } - } - - if fullyCompleted { - err = multi.Complete(completedParts) - } else { - err = multi.Abort() - } - - return err -} diff --git a/vendor/github.com/denverdino/aliyungo/oss/export.go b/vendor/github.com/denverdino/aliyungo/oss/export.go deleted file mode 100644 index ebdb047..0000000 --- a/vendor/github.com/denverdino/aliyungo/oss/export.go +++ /dev/null @@ -1,23 +0,0 @@ -package oss - -import ( - "github.com/denverdino/aliyungo/util" -) - -var originalStrategy = attempts - -func SetAttemptStrategy(s *util.AttemptStrategy) { - if s == nil { - attempts = originalStrategy - } else { - attempts = *s - } -} - -func SetListPartsMax(n int) { - listPartsMax = n -} - -func SetListMultiMax(n int) { - listMultiMax = n -} diff --git a/vendor/github.com/denverdino/aliyungo/oss/multi.go b/vendor/github.com/denverdino/aliyungo/oss/multi.go deleted file mode 100644 index d720e18..0000000 --- a/vendor/github.com/denverdino/aliyungo/oss/multi.go +++ /dev/null @@ -1,489 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "io" - "time" - //"log" - "net/http" - "net/url" - "sort" - "strconv" - "strings" -) - -// Multi represents an unfinished multipart upload. -// -// Multipart uploads allow sending big objects in smaller chunks. -// After all parts have been sent, the upload must be explicitly -// completed by calling Complete with the list of parts. - -type Multi struct { - Bucket *Bucket - Key string - UploadId string -} - -// That's the default. Here just for testing. -var listMultiMax = 1000 - -type listMultiResp struct { - NextKeyMarker string - NextUploadIdMarker string - IsTruncated bool - Upload []Multi - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` -} - -// ListMulti returns the list of unfinished multipart uploads in b. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. You can use prefixes to separate a bucket into different -// groupings of keys (to get the feeling of folders, for example). -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { - params := make(url.Values) - params.Set("uploads", "") - params.Set("max-uploads", strconv.FormatInt(int64(listMultiMax), 10)) - params.Set("prefix", prefix) - params.Set("delimiter", delim) - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: b.Name, - params: params, - } - var resp listMultiResp - err := b.Client.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, nil, err - } - for i := range resp.Upload { - multi := &resp.Upload[i] - multi.Bucket = b - multis = append(multis, multi) - } - prefixes = append(prefixes, resp.CommonPrefixes...) - if !resp.IsTruncated { - return multis, prefixes, nil - } - params.Set("key-marker", resp.NextKeyMarker) - params.Set("upload-id-marker", resp.NextUploadIdMarker) - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -// Multi returns a multipart upload handler for the provided key -// inside b. If a multipart upload exists for key, it is returned, -// otherwise a new multipart upload is initiated with contType and perm. -func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { - multis, _, err := b.ListMulti(key, "") - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - for _, m := range multis { - if m.Key == key { - return m, nil - } - } - return b.InitMulti(key, contType, perm, options) -} - -// InitMulti initializes a new multipart upload at the provided -// key inside b and returns a value for manipulating it. -// -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&InitiateMultipartUpload -func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { - headers := make(http.Header) - headers.Set("Content-Length", "0") - headers.Set("Content-Type", contType) - headers.Set("x-oss-acl", string(perm)) - - options.addHeaders(headers) - params := make(url.Values) - params.Set("uploads", "") - req := &request{ - method: "POST", - bucket: b.Name, - path: key, - headers: headers, - params: params, - } - var err error - var resp struct { - UploadId string `xml:"UploadId"` - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.Client.query(req, &resp) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil -} - -func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { - return m.PutPartCopyWithContentLength(n, options, source, -1) -} - -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPartCopy -func (m *Multi) PutPartCopyWithContentLength(n int, options CopyOptions, source string, contentLength int64) (*CopyObjectResult, Part, error) { - // TODO source format a /BUCKET/PATH/TO/OBJECT - // TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path) - - headers := make(http.Header) - headers.Set("x-oss-copy-source", source) - - options.addHeaders(headers) - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("partNumber", strconv.FormatInt(int64(n), 10)) - - if contentLength < 0 { - sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/")) - //log.Println("source: ", source) - //log.Println("sourceBucket: ", sourceBucket.Name) - //log.Println("HEAD: ", strings.strings.SplitAfterN(source, "/", 3)[2]) - // TODO SplitAfterN can be use in bucket name - sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 3)[2], nil) - if err != nil { - return nil, Part{}, err - } - contentLength = sourceMeta.ContentLength - } - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - } - resp := &CopyObjectResult{} - err := m.Bucket.Client.query(req, resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, Part{}, err - } - if resp.ETag == "" { - return nil, Part{}, errors.New("part upload succeeded with no ETag") - } - return resp, Part{n, resp.ETag, contentLength}, nil - } - panic("unreachable") -} - -// PutPart sends part n of the multipart upload, reading all the content from r. -// Each part, except for the last one, must be at least 5MB in size. -// -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPart -func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64, 0) -} - -func (m *Multi) PutPartWithTimeout(n int, r io.ReadSeeker, timeout time.Duration) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64, timeout) -} - -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string, timeout time.Duration) (Part, error) { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(partSize, 10)) - headers.Set("Content-MD5", md5b64) - - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("partNumber", strconv.FormatInt(int64(n), 10)) - - for attempt := attempts.Start(); attempt.Next(); { - _, err := r.Seek(0, 0) - if err != nil { - return Part{}, err - } - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - payload: r, - timeout: timeout, - } - err = m.Bucket.Client.prepare(req) - if err != nil { - return Part{}, err - } - resp, err := m.Bucket.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return Part{}, err - } - etag := resp.Header.Get("ETag") - if etag == "" { - return Part{}, errors.New("part upload succeeded with no ETag") - } - return Part{n, etag, partSize}, nil - } - panic("unreachable") -} - -func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { - _, err = r.Seek(0, 0) - if err != nil { - return 0, "", "", err - } - digest := md5.New() - size, err = io.Copy(digest, r) - if err != nil { - return 0, "", "", err - } - sum := digest.Sum(nil) - md5hex = hex.EncodeToString(sum) - md5b64 = base64.StdEncoding.EncodeToString(sum) - return size, md5hex, md5b64, nil -} - -type Part struct { - N int `xml:"PartNumber"` - ETag string - Size int64 -} - -type partSlice []Part - -func (s partSlice) Len() int { return len(s) } -func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } -func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type listPartsResp struct { - NextPartNumberMarker string - IsTruncated bool - Part []Part -} - -// That's the default. Here just for testing. -var listPartsMax = 1000 - -// ListParts for backcompatability. See the documentation for ListPartsFull -func (m *Multi) ListParts() ([]Part, error) { - return m.ListPartsFull(0, listPartsMax) -} - -// ListPartsFull returns the list of previously uploaded parts in m, -// ordered by part number (Only parts with higher part numbers than -// partNumberMarker will be listed). Only up to maxParts parts will be -// returned. -// -func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { - if maxParts > listPartsMax { - maxParts = listPartsMax - } - - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("max-parts", strconv.FormatInt(int64(maxParts), 10)) - params.Set("part-number-marker", strconv.FormatInt(int64(partNumberMarker), 10)) - - var parts partSlice - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - var resp listPartsResp - err := m.Bucket.Client.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - parts = append(parts, resp.Part...) - if !resp.IsTruncated { - sort.Sort(parts) - return parts, nil - } - params.Set("part-number-marker", resp.NextPartNumberMarker) - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -type ReaderAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// PutAll sends all of r via a multipart upload with parts no larger -// than partSize bytes, which must be set to at least 5MB. -// Parts previously uploaded are either reused if their checksum -// and size match the new part, or otherwise overwritten with the -// new content. -// PutAll returns all the parts of m (reused or not). -func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { - old, err := m.ListParts() - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - reuse := 0 // Index of next old part to consider reusing. - current := 1 // Part number of latest good part handled. - totalSize, err := r.Seek(0, 2) - if err != nil { - return nil, err - } - first := true // Must send at least one empty part if the file is empty. - var result []Part -NextSection: - for offset := int64(0); offset < totalSize || first; offset += partSize { - first = false - if offset+partSize > totalSize { - partSize = totalSize - offset - } - section := io.NewSectionReader(r, offset, partSize) - _, md5hex, md5b64, err := seekerInfo(section) - if err != nil { - return nil, err - } - for reuse < len(old) && old[reuse].N <= current { - // Looks like this part was already sent. - part := &old[reuse] - etag := `"` + md5hex + `"` - if part.N == current && part.Size == partSize && part.ETag == etag { - // Checksum matches. Reuse the old part. - result = append(result, *part) - current++ - continue NextSection - } - reuse++ - } - - // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64, 0) - if err != nil { - return nil, err - } - result = append(result, part) - current++ - } - return result, nil -} - -type completeUpload struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts completeParts `xml:"Part"` -} - -type completePart struct { - PartNumber int - ETag string -} - -type completeParts []completePart - -func (p completeParts) Len() int { return len(p) } -func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } -func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Complete assembles the given previously uploaded parts into the -// final object. This operation may take several minutes. -// -func (m *Multi) Complete(parts []Part) error { - params := make(url.Values) - params.Set("uploadId", m.UploadId) - - c := completeUpload{} - for _, p := range parts { - c.Parts = append(c.Parts, completePart{p.N, p.ETag}) - } - sort.Sort(c.Parts) - data, err := xml.Marshal(&c) - if err != nil { - return err - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "POST", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - payload: bytes.NewReader(data), - } - err := m.Bucket.Client.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} - -// Abort deletes an unifinished multipart upload and any previously -// uploaded parts for it. -// -// After a multipart upload is aborted, no additional parts can be -// uploaded using it. However, if any part uploads are currently in -// progress, those part uploads might or might not succeed. As a result, -// it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// NOTE: If the described scenario happens to you, please report back to -// the goamz authors with details. In the future such retrying should be -// handled internally, but it's not clear what happens precisely (Is an -// error returned? Is the issue completely undetectable?). -// -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&AbortMultipartUpload -func (m *Multi) Abort() error { - params := make(url.Values) - params.Set("uploadId", m.UploadId) - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - err := m.Bucket.Client.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} diff --git a/vendor/github.com/denverdino/aliyungo/oss/regions.go b/vendor/github.com/denverdino/aliyungo/oss/regions.go deleted file mode 100644 index 048170c..0000000 --- a/vendor/github.com/denverdino/aliyungo/oss/regions.go +++ /dev/null @@ -1,78 +0,0 @@ -package oss - -import ( - "fmt" -) - -// Region represents OSS region -type Region string - -// Constants of region definition -const ( - Hangzhou = Region("oss-cn-hangzhou") - Qingdao = Region("oss-cn-qingdao") - Beijing = Region("oss-cn-beijing") - Hongkong = Region("oss-cn-hongkong") - Shenzhen = Region("oss-cn-shenzhen") - Shanghai = Region("oss-cn-shanghai") - Zhangjiakou = Region("oss-cn-zhangjiakou") - - USWest1 = Region("oss-us-west-1") - USEast1 = Region("oss-us-east-1") - APSouthEast1 = Region("oss-ap-southeast-1") - APNorthEast1 = Region("oss-ap-northeast-1") - APSouthEast2 = Region("oss-ap-southeast-2") - - MEEast1 = Region("oss-me-east-1") - - EUCentral1 = Region("oss-eu-central-1") - - DefaultRegion = Hangzhou -) - -// GetEndpoint returns endpoint of region -func (r Region) GetEndpoint(internal bool, bucket string, secure bool) string { - if internal { - return r.GetInternalEndpoint(bucket, secure) - } - return r.GetInternetEndpoint(bucket, secure) -} - -func getProtocol(secure bool) string { - protocol := "http" - if secure { - protocol = "https" - } - return protocol -} - -// GetInternetEndpoint returns internet endpoint of region -func (r Region) GetInternetEndpoint(bucket string, secure bool) string { - protocol := getProtocol(secure) - if bucket == "" { - return fmt.Sprintf("%s://oss.aliyuncs.com", protocol) - } - return fmt.Sprintf("%s://%s.%s.aliyuncs.com", protocol, bucket, string(r)) -} - -// GetInternalEndpoint returns internal endpoint of region -func (r Region) GetInternalEndpoint(bucket string, secure bool) string { - protocol := getProtocol(secure) - if bucket == "" { - return fmt.Sprintf("%s://oss-internal.aliyuncs.com", protocol) - } - return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r)) -} - -// GetInternalEndpoint returns internal endpoint of region -func (r Region) GetVPCInternalEndpoint(bucket string, secure bool) string { - protocol := getProtocol(secure) - if bucket == "" { - return fmt.Sprintf("%s://vpc100-oss-cn-hangzhou.aliyuncs.com", protocol) - } - if r == USEast1 { - return r.GetInternalEndpoint(bucket, secure) - } else { - return fmt.Sprintf("%s://%s.vpc100-%s.aliyuncs.com", protocol, bucket, string(r)) - } -} diff --git a/vendor/github.com/denverdino/aliyungo/oss/signature.go b/vendor/github.com/denverdino/aliyungo/oss/signature.go deleted file mode 100644 index 1267717..0000000 --- a/vendor/github.com/denverdino/aliyungo/oss/signature.go +++ /dev/null @@ -1,107 +0,0 @@ -package oss - -import ( - "github.com/denverdino/aliyungo/util" - //"log" - "net/http" - "net/url" - "sort" - "strings" -) - -const HeaderOSSPrefix = "x-oss-" - -var ossParamsToSign = map[string]bool{ - "acl": true, - "delete": true, - "location": true, - "logging": true, - "notification": true, - "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, - "bucketInfo": true, -} - -func (client *Client) signRequest(request *request) { - query := request.params - - urlSignature := query.Get("OSSAccessKeyId") != "" - - headers := request.headers - contentMd5 := headers.Get("Content-Md5") - contentType := headers.Get("Content-Type") - date := "" - if urlSignature { - date = query.Get("Expires") - } else { - date = headers.Get("Date") - } - - resource := request.path - if request.bucket != "" { - resource = "/" + request.bucket + request.path - } - params := make(url.Values) - for k, v := range query { - if ossParamsToSign[k] { - params[k] = v - } - } - - if len(params) > 0 { - resource = resource + "?" + util.Encode(params) - } - - canonicalizedResource := resource - - _, canonicalizedHeader := canonicalizeHeader(headers) - - stringToSign := request.method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedHeader + canonicalizedResource - - //log.Println("stringToSign: ", stringToSign) - signature := util.CreateSignature(stringToSign, client.AccessKeySecret) - - if query.Get("OSSAccessKeyId") != "" { - query.Set("Signature", signature) - } else { - headers.Set("Authorization", "OSS "+client.AccessKeyId+":"+signature) - } -} - -//Have to break the abstraction to append keys with lower case. -func canonicalizeHeader(headers http.Header) (newHeaders http.Header, result string) { - var canonicalizedHeaders []string - newHeaders = http.Header{} - - for k, v := range headers { - if lower := strings.ToLower(k); strings.HasPrefix(lower, HeaderOSSPrefix) { - newHeaders[lower] = v - canonicalizedHeaders = append(canonicalizedHeaders, lower) - } else { - newHeaders[k] = v - } - } - - sort.Strings(canonicalizedHeaders) - - var canonicalizedHeader string - - for _, k := range canonicalizedHeaders { - canonicalizedHeader += k + ":" + headers.Get(k) + "\n" - } - - return newHeaders, canonicalizedHeader -} diff --git a/vendor/github.com/denverdino/aliyungo/util/attempt.go b/vendor/github.com/denverdino/aliyungo/util/attempt.go deleted file mode 100644 index 2d07f03..0000000 --- a/vendor/github.com/denverdino/aliyungo/util/attempt.go +++ /dev/null @@ -1,76 +0,0 @@ -package util - -import ( - "time" -) - -// AttemptStrategy is reused from the goamz package - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff --git a/vendor/github.com/denverdino/aliyungo/util/encoding.go b/vendor/github.com/denverdino/aliyungo/util/encoding.go deleted file mode 100644 index e545e06..0000000 --- a/vendor/github.com/denverdino/aliyungo/util/encoding.go +++ /dev/null @@ -1,152 +0,0 @@ -package util - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "reflect" - "strconv" - "time" -) - -//ConvertToQueryValues converts the struct to url.Values -func ConvertToQueryValues(ifc interface{}) url.Values { - values := url.Values{} - SetQueryValues(ifc, &values) - return values -} - -//SetQueryValues sets the struct to existing url.Values following ECS encoding rules -func SetQueryValues(ifc interface{}, values *url.Values) { - setQueryValues(ifc, values, "") -} - -func setQueryValues(i interface{}, values *url.Values, prefix string) { - // add to support url.Values - mapValues, ok := i.(url.Values) - if ok { - for k, _ := range mapValues { - values.Set(k, mapValues.Get(k)) - } - return - } - - elem := reflect.ValueOf(i) - if elem.Kind() == reflect.Ptr { - elem = elem.Elem() - } - elemType := elem.Type() - for i := 0; i < elem.NumField(); i++ { - - fieldName := elemType.Field(i).Name - anonymous := elemType.Field(i).Anonymous - field := elem.Field(i) - // TODO Use Tag for validation - // tag := typ.Field(i).Tag.Get("tagname") - kind := field.Kind() - if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { - continue - } - if kind == reflect.Ptr { - field = field.Elem() - kind = field.Kind() - } - var value string - //switch field.Interface().(type) { - switch kind { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - i := field.Int() - if i != 0 { - value = strconv.FormatInt(i, 10) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - i := field.Uint() - if i != 0 { - value = strconv.FormatUint(i, 10) - } - case reflect.Float32: - value = strconv.FormatFloat(field.Float(), 'f', 4, 32) - case reflect.Float64: - value = strconv.FormatFloat(field.Float(), 'f', 4, 64) - case reflect.Bool: - value = strconv.FormatBool(field.Bool()) - case reflect.String: - value = field.String() - case reflect.Map: - ifc := field.Interface() - m := ifc.(map[string]string) - if m != nil { - j := 0 - for k, v := range m { - j++ - keyName := fmt.Sprintf("%s.%d.Key", fieldName, j) - values.Set(keyName, k) - valueName := fmt.Sprintf("%s.%d.Value", fieldName, j) - values.Set(valueName, v) - } - } - case reflect.Slice: - switch field.Type().Elem().Kind() { - case reflect.Uint8: - value = string(field.Bytes()) - case reflect.String: - l := field.Len() - if l > 0 { - strArray := make([]string, l) - for i := 0; i < l; i++ { - strArray[i] = field.Index(i).String() - } - bytes, err := json.Marshal(strArray) - if err == nil { - value = string(bytes) - } else { - log.Printf("Failed to convert JSON: %v", err) - } - } - default: - l := field.Len() - for j := 0; j < l; j++ { - prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) - ifc := field.Index(j).Interface() - //log.Printf("%s : %v", prefixName, ifc) - if ifc != nil { - setQueryValues(ifc, values, prefixName) - } - } - continue - } - - default: - switch field.Interface().(type) { - case ISO6801Time: - t := field.Interface().(ISO6801Time) - value = t.String() - case time.Time: - t := field.Interface().(time.Time) - value = GetISO8601TimeStamp(t) - default: - ifc := field.Interface() - if ifc != nil { - if anonymous { - SetQueryValues(ifc, values) - } else { - prefixName := fieldName + "." - setQueryValues(ifc, values, prefixName) - } - continue - } - } - } - if value != "" { - name := elemType.Field(i).Tag.Get("ArgName") - if name == "" { - name = fieldName - } - if prefix != "" { - name = prefix + name - } - values.Set(name, value) - } - } -} diff --git a/vendor/github.com/denverdino/aliyungo/util/iso6801.go b/vendor/github.com/denverdino/aliyungo/util/iso6801.go deleted file mode 100644 index 9c25e8f..0000000 --- a/vendor/github.com/denverdino/aliyungo/util/iso6801.go +++ /dev/null @@ -1,80 +0,0 @@ -package util - -import ( - "fmt" - "strconv" - "time" -) - -// GetISO8601TimeStamp gets timestamp string in ISO8601 format -func GetISO8601TimeStamp(ts time.Time) string { - t := ts.UTC() - return fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02dZ", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) -} - -const formatISO8601 = "2006-01-02T15:04:05Z" -const jsonFormatISO8601 = `"` + formatISO8601 + `"` -const formatISO8601withoutSeconds = "2006-01-02T15:04Z" -const jsonFormatISO8601withoutSeconds = `"` + formatISO8601withoutSeconds + `"` - -// A ISO6801Time represents a time in ISO8601 format -type ISO6801Time time.Time - -// New constructs a new iso8601.Time instance from an existing -// time.Time instance. This causes the nanosecond field to be set to -// 0, and its time zone set to a fixed zone with no offset from UTC -// (but it is *not* UTC itself). -func NewISO6801Time(t time.Time) ISO6801Time { - return ISO6801Time(time.Date( - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - 0, - time.UTC, - )) -} - -// IsDefault checks if the time is default -func (it *ISO6801Time) IsDefault() bool { - return *it == ISO6801Time{} -} - -// MarshalJSON serializes the ISO6801Time into JSON string -func (it ISO6801Time) MarshalJSON() ([]byte, error) { - return []byte(time.Time(it).Format(jsonFormatISO8601)), nil -} - -// UnmarshalJSON deserializes the ISO6801Time from JSON string -func (it *ISO6801Time) UnmarshalJSON(data []byte) error { - str := string(data) - - if str == "\"\"" || len(data) == 0 { - return nil - } - var t time.Time - var err error - if str[0] == '"' { - t, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC) - if err != nil { - t, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC) - } - } else { - var i int64 - i, err = strconv.ParseInt(str, 10, 64) - if err == nil { - t = time.Unix(i/1000, i%1000) - } - } - if err == nil { - *it = ISO6801Time(t) - } - return err -} - -// String returns the time in ISO6801Time format -func (it ISO6801Time) String() string { - return time.Time(it).Format(formatISO8601) -} diff --git a/vendor/github.com/denverdino/aliyungo/util/signature.go b/vendor/github.com/denverdino/aliyungo/util/signature.go deleted file mode 100644 index a00b27c..0000000 --- a/vendor/github.com/denverdino/aliyungo/util/signature.go +++ /dev/null @@ -1,40 +0,0 @@ -package util - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "net/url" - "strings" -) - -//CreateSignature creates signature for string following Aliyun rules -func CreateSignature(stringToSignature, accessKeySecret string) string { - // Crypto by HMAC-SHA1 - hmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret)) - hmacSha1.Write([]byte(stringToSignature)) - sign := hmacSha1.Sum(nil) - - // Encode to Base64 - base64Sign := base64.StdEncoding.EncodeToString(sign) - - return base64Sign -} - -func percentReplace(str string) string { - str = strings.Replace(str, "+", "%20", -1) - str = strings.Replace(str, "*", "%2A", -1) - str = strings.Replace(str, "%7E", "~", -1) - - return str -} - -// CreateSignatureForRequest creates signature for query string values -func CreateSignatureForRequest(method string, values *url.Values, accessKeySecret string) string { - - canonicalizedQueryString := percentReplace(values.Encode()) - - stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString) - - return CreateSignature(stringToSign, accessKeySecret) -} diff --git a/vendor/github.com/denverdino/aliyungo/util/util.go b/vendor/github.com/denverdino/aliyungo/util/util.go deleted file mode 100644 index dd68214..0000000 --- a/vendor/github.com/denverdino/aliyungo/util/util.go +++ /dev/null @@ -1,147 +0,0 @@ -package util - -import ( - "bytes" - srand "crypto/rand" - "encoding/binary" - "math/rand" - "net/http" - "net/url" - "sort" - "time" -) - -const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - -//CreateRandomString create random string -func CreateRandomString() string { - b := make([]byte, 32) - l := len(dictionary) - - _, err := srand.Read(b) - - if err != nil { - // fail back to insecure rand - rand.Seed(time.Now().UnixNano()) - for i := range b { - b[i] = dictionary[rand.Int()%l] - } - } else { - for i, v := range b { - b[i] = dictionary[v%byte(l)] - } - } - - return string(b) -} - -// Encode encodes the values into ``URL encoded'' form -// ("acl&bar=baz&foo=quux") sorted by key. -func Encode(v url.Values) string { - if v == nil { - return "" - } - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := url.QueryEscape(k) - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - if v != "" { - buf.WriteString("=") - buf.WriteString(url.QueryEscape(v)) - } - } - } - return buf.String() -} - -func GetGMTime() string { - return time.Now().UTC().Format(http.TimeFormat) -} - -// - -func randUint32() uint32 { - return randUint32Slice(1)[0] -} - -func randUint32Slice(c int) []uint32 { - b := make([]byte, c*4) - - _, err := srand.Read(b) - - if err != nil { - // fail back to insecure rand - rand.Seed(time.Now().UnixNano()) - for i := range b { - b[i] = byte(rand.Int()) - } - } - - n := make([]uint32, c) - - for i := range n { - n[i] = binary.BigEndian.Uint32(b[i*4 : i*4+4]) - } - - return n -} - -func toByte(n uint32, st, ed byte) byte { - return byte(n%uint32(ed-st+1) + uint32(st)) -} - -func toDigit(n uint32) byte { - return toByte(n, '0', '9') -} - -func toLowerLetter(n uint32) byte { - return toByte(n, 'a', 'z') -} - -func toUpperLetter(n uint32) byte { - return toByte(n, 'A', 'Z') -} - -type convFunc func(uint32) byte - -var convFuncs = []convFunc{toDigit, toLowerLetter, toUpperLetter} - -// tools for generating a random ECS instance password -// from 8 to 30 char MUST contain digit upper, case letter and upper case letter -// http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance -func GenerateRandomECSPassword() string { - - // [8, 30] - l := int(randUint32()%23 + 8) - - n := randUint32Slice(l) - - b := make([]byte, l) - - b[0] = toDigit(n[0]) - b[1] = toLowerLetter(n[1]) - b[2] = toUpperLetter(n[2]) - - for i := 3; i < l; i++ { - b[i] = convFuncs[n[i]%3](n[i]) - } - - s := make([]byte, l) - perm := rand.Perm(l) - for i, v := range perm { - s[v] = b[i] - } - - return string(s) - -} diff --git a/vendor/github.com/disintegration/imaging/LICENSE b/vendor/github.com/disintegration/imaging/LICENSE deleted file mode 100755 index 95ae410..0000000 --- a/vendor/github.com/disintegration/imaging/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2012-2014 Grigory Dryapak - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/disintegration/imaging/README.md b/vendor/github.com/disintegration/imaging/README.md deleted file mode 100755 index 3dcea20..0000000 --- a/vendor/github.com/disintegration/imaging/README.md +++ /dev/null @@ -1,198 +0,0 @@ -# Imaging - -[![GoDoc](https://godoc.org/github.com/disintegration/imaging?status.svg)](https://godoc.org/github.com/disintegration/imaging) -[![Build Status](https://travis-ci.org/disintegration/imaging.svg?branch=master)](https://travis-ci.org/disintegration/imaging) -[![Coverage Status](https://coveralls.io/repos/github/disintegration/imaging/badge.svg?branch=master)](https://coveralls.io/github/disintegration/imaging?branch=master) - -Package imaging provides basic image manipulation functions (resize, rotate, flip, crop, etc.). -This package is based on the standard Go image package and works best along with it. - -Image manipulation functions provided by the package take any image type -that implements `image.Image` interface as an input, and return a new image of -`*image.NRGBA` type (32bit RGBA colors, not premultiplied by alpha). - -## Installation - -Imaging requires Go version 1.2 or greater. - - go get -u github.com/disintegration/imaging - -## Documentation - -http://godoc.org/github.com/disintegration/imaging - -## Usage examples - -A few usage examples can be found below. See the documentation for the full list of supported functions. - -### Image resizing -```go -// resize srcImage to size = 128x128px using the Lanczos filter -dstImage128 := imaging.Resize(srcImage, 128, 128, imaging.Lanczos) - -// resize srcImage to width = 800px preserving the aspect ratio -dstImage800 := imaging.Resize(srcImage, 800, 0, imaging.Lanczos) - -// scale down srcImage to fit the 800x600px bounding box -dstImageFit := imaging.Fit(srcImage, 800, 600, imaging.Lanczos) - -// resize and crop the srcImage to fill the 100x100px area -dstImageFill := imaging.Fill(srcImage, 100, 100, imaging.Center, imaging.Lanczos) -``` - -Imaging supports image resizing using various resampling filters. The most notable ones: -- `NearestNeighbor` - Fastest resampling filter, no antialiasing. -- `Box` - Simple and fast averaging filter appropriate for downscaling. When upscaling it's similar to NearestNeighbor. -- `Linear` - Bilinear filter, smooth and reasonably fast. -- `MitchellNetravali` - А smooth bicubic filter. -- `CatmullRom` - A sharp bicubic filter. -- `Gaussian` - Blurring filter that uses gaussian function, useful for noise removal. -- `Lanczos` - High-quality resampling filter for photographic images yielding sharp results, but it's slower than cubic filters. - -The full list of supported filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. Custom filters can be created using ResampleFilter struct. - -**Resampling filters comparison** - -Original image. Will be resized from 512x512px to 128x128px. - -![srcImage](http://disintegration.github.io/imaging/in_lena_bw_512.png) - -Filter | Resize result ----|--- -`imaging.NearestNeighbor` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_nearest.png) -`imaging.Box` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_box.png) -`imaging.Linear` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_linear.png) -`imaging.MitchellNetravali` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_mitchell.png) -`imaging.CatmullRom` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_catrom.png) -`imaging.Gaussian` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_gaussian.png) -`imaging.Lanczos` | ![dstImage](http://disintegration.github.io/imaging/out_resize_down_lanczos.png) - -**Resize functions comparison** - -Original image: - -![srcImage](http://disintegration.github.io/imaging/in.jpg) - -Resize the image to width=100px and height=100px: - -```go -dstImage := imaging.Resize(srcImage, 100, 100, imaging.Lanczos) -``` -![dstImage](http://disintegration.github.io/imaging/out-comp-resize.jpg) - -Resize the image to width=100px preserving the aspect ratio: - -```go -dstImage := imaging.Resize(srcImage, 100, 0, imaging.Lanczos) -``` -![dstImage](http://disintegration.github.io/imaging/out-comp-fit.jpg) - -Resize the image to fit the 100x100px boundng box preserving the aspect ratio: - -```go -dstImage := imaging.Fit(srcImage, 100, 100, imaging.Lanczos) -``` -![dstImage](http://disintegration.github.io/imaging/out-comp-fit.jpg) - -Resize and crop the image with a center anchor point to fill the 100x100px area: - -```go -dstImage := imaging.Fill(srcImage, 100, 100, imaging.Center, imaging.Lanczos) -``` -![dstImage](http://disintegration.github.io/imaging/out-comp-fill.jpg) - -### Gaussian Blur -```go -dstImage := imaging.Blur(srcImage, 0.5) -``` - -Sigma parameter allows to control the strength of the blurring effect. - -Original image | Sigma = 0.5 | Sigma = 1.5 ----|---|--- -![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_blur_0.5.png) | ![dstImage](http://disintegration.github.io/imaging/out_blur_1.5.png) - -### Sharpening -```go -dstImage := imaging.Sharpen(srcImage, 0.5) -``` - -Uses gaussian function internally. Sigma parameter allows to control the strength of the sharpening effect. - -Original image | Sigma = 0.5 | Sigma = 1.5 ----|---|--- -![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_sharpen_0.5.png) | ![dstImage](http://disintegration.github.io/imaging/out_sharpen_1.5.png) - -### Gamma correction -```go -dstImage := imaging.AdjustGamma(srcImage, 0.75) -``` - -Original image | Gamma = 0.75 | Gamma = 1.25 ----|---|--- -![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_gamma_0.75.png) | ![dstImage](http://disintegration.github.io/imaging/out_gamma_1.25.png) - -### Contrast adjustment -```go -dstImage := imaging.AdjustContrast(srcImage, 20) -``` - -Original image | Contrast = 20 | Contrast = -20 ----|---|--- -![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_contrast_p20.png) | ![dstImage](http://disintegration.github.io/imaging/out_contrast_m20.png) - -### Brightness adjustment -```go -dstImage := imaging.AdjustBrightness(srcImage, 20) -``` - -Original image | Brightness = 20 | Brightness = -20 ----|---|--- -![srcImage](http://disintegration.github.io/imaging/in_lena_bw_128.png) | ![dstImage](http://disintegration.github.io/imaging/out_brightness_p20.png) | ![dstImage](http://disintegration.github.io/imaging/out_brightness_m20.png) - - -### Complete code example -Here is the code example that loads several images, makes thumbnails of them -and combines them together side-by-side. - -```go -package main - -import ( - "image" - "image/color" - - "github.com/disintegration/imaging" -) - -func main() { - - // input files - files := []string{"01.jpg", "02.jpg", "03.jpg"} - - // load images and make 100x100 thumbnails of them - var thumbnails []image.Image - for _, file := range files { - img, err := imaging.Open(file) - if err != nil { - panic(err) - } - thumb := imaging.Thumbnail(img, 100, 100, imaging.CatmullRom) - thumbnails = append(thumbnails, thumb) - } - - // create a new blank image - dst := imaging.New(100*len(thumbnails), 100, color.NRGBA{0, 0, 0, 0}) - - // paste thumbnails into the new image side by side - for i, thumb := range thumbnails { - dst = imaging.Paste(dst, thumb, image.Pt(i*100, 0)) - } - - // save the combined image to file - err := imaging.Save(dst, "dst.jpg") - if err != nil { - panic(err) - } -} -``` diff --git a/vendor/github.com/disintegration/imaging/adjust.go b/vendor/github.com/disintegration/imaging/adjust.go deleted file mode 100755 index 9b1b83a..0000000 --- a/vendor/github.com/disintegration/imaging/adjust.go +++ /dev/null @@ -1,200 +0,0 @@ -package imaging - -import ( - "image" - "image/color" - "math" -) - -// AdjustFunc applies the fn function to each pixel of the img image and returns the adjusted image. -// -// Example: -// -// dstImage = imaging.AdjustFunc( -// srcImage, -// func(c color.NRGBA) color.NRGBA { -// // shift the red channel by 16 -// r := int(c.R) + 16 -// if r > 255 { -// r = 255 -// } -// return color.NRGBA{uint8(r), c.G, c.B, c.A} -// } -// ) -// -func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA { - src := toNRGBA(img) - width := src.Bounds().Max.X - height := src.Bounds().Max.Y - dst := image.NewNRGBA(image.Rect(0, 0, width, height)) - - parallel(height, func(partStart, partEnd int) { - for y := partStart; y < partEnd; y++ { - for x := 0; x < width; x++ { - i := y*src.Stride + x*4 - j := y*dst.Stride + x*4 - - r := src.Pix[i+0] - g := src.Pix[i+1] - b := src.Pix[i+2] - a := src.Pix[i+3] - - c := fn(color.NRGBA{r, g, b, a}) - - dst.Pix[j+0] = c.R - dst.Pix[j+1] = c.G - dst.Pix[j+2] = c.B - dst.Pix[j+3] = c.A - } - } - }) - - return dst -} - -// AdjustGamma performs a gamma correction on the image and returns the adjusted image. -// Gamma parameter must be positive. Gamma = 1.0 gives the original image. -// Gamma less than 1.0 darkens the image and gamma greater than 1.0 lightens it. -// -// Example: -// -// dstImage = imaging.AdjustGamma(srcImage, 0.7) -// -func AdjustGamma(img image.Image, gamma float64) *image.NRGBA { - e := 1.0 / math.Max(gamma, 0.0001) - lut := make([]uint8, 256) - - for i := 0; i < 256; i++ { - lut[i] = clamp(math.Pow(float64(i)/255.0, e) * 255.0) - } - - fn := func(c color.NRGBA) color.NRGBA { - return color.NRGBA{lut[c.R], lut[c.G], lut[c.B], c.A} - } - - return AdjustFunc(img, fn) -} - -func sigmoid(a, b, x float64) float64 { - return 1 / (1 + math.Exp(b*(a-x))) -} - -// AdjustSigmoid changes the contrast of the image using a sigmoidal function and returns the adjusted image. -// It's a non-linear contrast change useful for photo adjustments as it preserves highlight and shadow detail. -// The midpoint parameter is the midpoint of contrast that must be between 0 and 1, typically 0.5. -// The factor parameter indicates how much to increase or decrease the contrast, typically in range (-10, 10). -// If the factor parameter is positive the image contrast is increased otherwise the contrast is decreased. -// -// Examples: -// -// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // increase the contrast -// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // decrease the contrast -// -func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA { - if factor == 0 { - return Clone(img) - } - - lut := make([]uint8, 256) - a := math.Min(math.Max(midpoint, 0.0), 1.0) - b := math.Abs(factor) - sig0 := sigmoid(a, b, 0) - sig1 := sigmoid(a, b, 1) - e := 1.0e-6 - - if factor > 0 { - for i := 0; i < 256; i++ { - x := float64(i) / 255.0 - sigX := sigmoid(a, b, x) - f := (sigX - sig0) / (sig1 - sig0) - lut[i] = clamp(f * 255.0) - } - } else { - for i := 0; i < 256; i++ { - x := float64(i) / 255.0 - arg := math.Min(math.Max((sig1-sig0)*x+sig0, e), 1.0-e) - f := a - math.Log(1.0/arg-1.0)/b - lut[i] = clamp(f * 255.0) - } - } - - fn := func(c color.NRGBA) color.NRGBA { - return color.NRGBA{lut[c.R], lut[c.G], lut[c.B], c.A} - } - - return AdjustFunc(img, fn) -} - -// AdjustContrast changes the contrast of the image using the percentage parameter and returns the adjusted image. -// The percentage must be in range (-100, 100). The percentage = 0 gives the original image. -// The percentage = -100 gives solid grey image. -// -// Examples: -// -// dstImage = imaging.AdjustContrast(srcImage, -10) // decrease image contrast by 10% -// dstImage = imaging.AdjustContrast(srcImage, 20) // increase image contrast by 20% -// -func AdjustContrast(img image.Image, percentage float64) *image.NRGBA { - percentage = math.Min(math.Max(percentage, -100.0), 100.0) - lut := make([]uint8, 256) - - v := (100.0 + percentage) / 100.0 - for i := 0; i < 256; i++ { - if 0 <= v && v <= 1 { - lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*v) * 255.0) - } else if 1 < v && v < 2 { - lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*(1/(2.0-v))) * 255.0) - } else { - lut[i] = uint8(float64(i)/255.0+0.5) * 255 - } - } - - fn := func(c color.NRGBA) color.NRGBA { - return color.NRGBA{lut[c.R], lut[c.G], lut[c.B], c.A} - } - - return AdjustFunc(img, fn) -} - -// AdjustBrightness changes the brightness of the image using the percentage parameter and returns the adjusted image. -// The percentage must be in range (-100, 100). The percentage = 0 gives the original image. -// The percentage = -100 gives solid black image. The percentage = 100 gives solid white image. -// -// Examples: -// -// dstImage = imaging.AdjustBrightness(srcImage, -15) // decrease image brightness by 15% -// dstImage = imaging.AdjustBrightness(srcImage, 10) // increase image brightness by 10% -// -func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA { - percentage = math.Min(math.Max(percentage, -100.0), 100.0) - lut := make([]uint8, 256) - - shift := 255.0 * percentage / 100.0 - for i := 0; i < 256; i++ { - lut[i] = clamp(float64(i) + shift) - } - - fn := func(c color.NRGBA) color.NRGBA { - return color.NRGBA{lut[c.R], lut[c.G], lut[c.B], c.A} - } - - return AdjustFunc(img, fn) -} - -// Grayscale produces grayscale version of the image. -func Grayscale(img image.Image) *image.NRGBA { - fn := func(c color.NRGBA) color.NRGBA { - f := 0.299*float64(c.R) + 0.587*float64(c.G) + 0.114*float64(c.B) - y := uint8(f + 0.5) - return color.NRGBA{y, y, y, c.A} - } - return AdjustFunc(img, fn) -} - -// Invert produces inverted (negated) version of the image. -func Invert(img image.Image) *image.NRGBA { - fn := func(c color.NRGBA) color.NRGBA { - return color.NRGBA{255 - c.R, 255 - c.G, 255 - c.B, c.A} - } - return AdjustFunc(img, fn) -} diff --git a/vendor/github.com/disintegration/imaging/effects.go b/vendor/github.com/disintegration/imaging/effects.go deleted file mode 100755 index 19d6e40..0000000 --- a/vendor/github.com/disintegration/imaging/effects.go +++ /dev/null @@ -1,189 +0,0 @@ -package imaging - -import ( - "image" - "math" -) - -func gaussianBlurKernel(x, sigma float64) float64 { - return math.Exp(-(x*x)/(2*sigma*sigma)) / (sigma * math.Sqrt(2*math.Pi)) -} - -// Blur produces a blurred version of the image using a Gaussian function. -// Sigma parameter must be positive and indicates how much the image will be blurred. -// -// Usage example: -// -// dstImage := imaging.Blur(srcImage, 3.5) -// -func Blur(img image.Image, sigma float64) *image.NRGBA { - if sigma <= 0 { - // sigma parameter must be positive! - return Clone(img) - } - - src := toNRGBA(img) - radius := int(math.Ceil(sigma * 3.0)) - kernel := make([]float64, radius+1) - - for i := 0; i <= radius; i++ { - kernel[i] = gaussianBlurKernel(float64(i), sigma) - } - - var dst *image.NRGBA - dst = blurHorizontal(src, kernel) - dst = blurVertical(dst, kernel) - - return dst -} - -func blurHorizontal(src *image.NRGBA, kernel []float64) *image.NRGBA { - radius := len(kernel) - 1 - width := src.Bounds().Max.X - height := src.Bounds().Max.Y - - dst := image.NewNRGBA(image.Rect(0, 0, width, height)) - - parallel(width, func(partStart, partEnd int) { - for x := partStart; x < partEnd; x++ { - start := x - radius - if start < 0 { - start = 0 - } - - end := x + radius - if end > width-1 { - end = width - 1 - } - - weightSum := 0.0 - for ix := start; ix <= end; ix++ { - weightSum += kernel[absint(x-ix)] - } - - for y := 0; y < height; y++ { - - r, g, b, a := 0.0, 0.0, 0.0, 0.0 - for ix := start; ix <= end; ix++ { - weight := kernel[absint(x-ix)] - i := y*src.Stride + ix*4 - wa := float64(src.Pix[i+3]) * weight - r += float64(src.Pix[i+0]) * wa - g += float64(src.Pix[i+1]) * wa - b += float64(src.Pix[i+2]) * wa - a += wa - } - - r = math.Min(math.Max(r/a, 0.0), 255.0) - g = math.Min(math.Max(g/a, 0.0), 255.0) - b = math.Min(math.Max(b/a, 0.0), 255.0) - a = math.Min(math.Max(a/weightSum, 0.0), 255.0) - - j := y*dst.Stride + x*4 - dst.Pix[j+0] = uint8(r + 0.5) - dst.Pix[j+1] = uint8(g + 0.5) - dst.Pix[j+2] = uint8(b + 0.5) - dst.Pix[j+3] = uint8(a + 0.5) - - } - } - }) - - return dst -} - -func blurVertical(src *image.NRGBA, kernel []float64) *image.NRGBA { - radius := len(kernel) - 1 - width := src.Bounds().Max.X - height := src.Bounds().Max.Y - - dst := image.NewNRGBA(image.Rect(0, 0, width, height)) - - parallel(height, func(partStart, partEnd int) { - for y := partStart; y < partEnd; y++ { - start := y - radius - if start < 0 { - start = 0 - } - - end := y + radius - if end > height-1 { - end = height - 1 - } - - weightSum := 0.0 - for iy := start; iy <= end; iy++ { - weightSum += kernel[absint(y-iy)] - } - - for x := 0; x < width; x++ { - - r, g, b, a := 0.0, 0.0, 0.0, 0.0 - for iy := start; iy <= end; iy++ { - weight := kernel[absint(y-iy)] - i := iy*src.Stride + x*4 - wa := float64(src.Pix[i+3]) * weight - r += float64(src.Pix[i+0]) * wa - g += float64(src.Pix[i+1]) * wa - b += float64(src.Pix[i+2]) * wa - a += wa - } - - r = math.Min(math.Max(r/a, 0.0), 255.0) - g = math.Min(math.Max(g/a, 0.0), 255.0) - b = math.Min(math.Max(b/a, 0.0), 255.0) - a = math.Min(math.Max(a/weightSum, 0.0), 255.0) - - j := y*dst.Stride + x*4 - dst.Pix[j+0] = uint8(r + 0.5) - dst.Pix[j+1] = uint8(g + 0.5) - dst.Pix[j+2] = uint8(b + 0.5) - dst.Pix[j+3] = uint8(a + 0.5) - - } - } - }) - - return dst -} - -// Sharpen produces a sharpened version of the image. -// Sigma parameter must be positive and indicates how much the image will be sharpened. -// -// Usage example: -// -// dstImage := imaging.Sharpen(srcImage, 3.5) -// -func Sharpen(img image.Image, sigma float64) *image.NRGBA { - if sigma <= 0 { - // sigma parameter must be positive! - return Clone(img) - } - - src := toNRGBA(img) - blurred := Blur(img, sigma) - - width := src.Bounds().Max.X - height := src.Bounds().Max.Y - dst := image.NewNRGBA(image.Rect(0, 0, width, height)) - - parallel(height, func(partStart, partEnd int) { - for y := partStart; y < partEnd; y++ { - for x := 0; x < width; x++ { - i := y*src.Stride + x*4 - for j := 0; j < 4; j++ { - k := i + j - val := int(src.Pix[k]) + (int(src.Pix[k]) - int(blurred.Pix[k])) - if val < 0 { - val = 0 - } else if val > 255 { - val = 255 - } - dst.Pix[k] = uint8(val) - } - } - } - }) - - return dst -} diff --git a/vendor/github.com/disintegration/imaging/helpers.go b/vendor/github.com/disintegration/imaging/helpers.go deleted file mode 100755 index 79967ae..0000000 --- a/vendor/github.com/disintegration/imaging/helpers.go +++ /dev/null @@ -1,400 +0,0 @@ -/* -Package imaging provides basic image manipulation functions (resize, rotate, flip, crop, etc.). -This package is based on the standard Go image package and works best along with it. - -Image manipulation functions provided by the package take any image type -that implements `image.Image` interface as an input, and return a new image of -`*image.NRGBA` type (32bit RGBA colors, not premultiplied by alpha). -*/ -package imaging - -import ( - "errors" - "image" - "image/color" - "image/gif" - "image/jpeg" - "image/png" - "io" - "os" - "path/filepath" - "strings" - - "golang.org/x/image/bmp" - "golang.org/x/image/tiff" -) - -type Format int - -const ( - JPEG Format = iota - PNG - GIF - TIFF - BMP -) - -func (f Format) String() string { - switch f { - case JPEG: - return "JPEG" - case PNG: - return "PNG" - case GIF: - return "GIF" - case TIFF: - return "TIFF" - case BMP: - return "BMP" - default: - return "Unsupported" - } -} - -var ( - ErrUnsupportedFormat = errors.New("imaging: unsupported image format") -) - -// Decode reads an image from r. -func Decode(r io.Reader) (image.Image, error) { - img, _, err := image.Decode(r) - if err != nil { - return nil, err - } - return toNRGBA(img), nil -} - -// Open loads an image from file -func Open(filename string) (image.Image, error) { - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - img, err := Decode(file) - return img, err -} - -// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP). -func Encode(w io.Writer, img image.Image, format Format) error { - var err error - switch format { - case JPEG: - var rgba *image.RGBA - if nrgba, ok := img.(*image.NRGBA); ok { - if nrgba.Opaque() { - rgba = &image.RGBA{ - Pix: nrgba.Pix, - Stride: nrgba.Stride, - Rect: nrgba.Rect, - } - } - } - if rgba != nil { - err = jpeg.Encode(w, rgba, &jpeg.Options{Quality: 95}) - } else { - err = jpeg.Encode(w, img, &jpeg.Options{Quality: 95}) - } - - case PNG: - err = png.Encode(w, img) - case GIF: - err = gif.Encode(w, img, &gif.Options{NumColors: 256}) - case TIFF: - err = tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true}) - case BMP: - err = bmp.Encode(w, img) - default: - err = ErrUnsupportedFormat - } - return err -} - -// Save saves the image to file with the specified filename. -// The format is determined from the filename extension: "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported. -func Save(img image.Image, filename string) (err error) { - formats := map[string]Format{ - ".jpg": JPEG, - ".jpeg": JPEG, - ".png": PNG, - ".tif": TIFF, - ".tiff": TIFF, - ".bmp": BMP, - ".gif": GIF, - } - - ext := strings.ToLower(filepath.Ext(filename)) - f, ok := formats[ext] - if !ok { - return ErrUnsupportedFormat - } - - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - - return Encode(file, img, f) -} - -// New creates a new image with the specified width and height, and fills it with the specified color. -func New(width, height int, fillColor color.Color) *image.NRGBA { - if width <= 0 || height <= 0 { - return &image.NRGBA{} - } - - dst := image.NewNRGBA(image.Rect(0, 0, width, height)) - c := color.NRGBAModel.Convert(fillColor).(color.NRGBA) - - if c.R == 0 && c.G == 0 && c.B == 0 && c.A == 0 { - return dst - } - - cs := []uint8{c.R, c.G, c.B, c.A} - - // fill the first row - for x := 0; x < width; x++ { - copy(dst.Pix[x*4:(x+1)*4], cs) - } - // copy the first row to other rows - for y := 1; y < height; y++ { - copy(dst.Pix[y*dst.Stride:y*dst.Stride+width*4], dst.Pix[0:width*4]) - } - - return dst -} - -// Clone returns a copy of the given image. -func Clone(img image.Image) *image.NRGBA { - srcBounds := img.Bounds() - srcMinX := srcBounds.Min.X - srcMinY := srcBounds.Min.Y - - dstBounds := srcBounds.Sub(srcBounds.Min) - dstW := dstBounds.Dx() - dstH := dstBounds.Dy() - dst := image.NewNRGBA(dstBounds) - - switch src := img.(type) { - - case *image.NRGBA: - rowSize := srcBounds.Dx() * 4 - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - si := src.PixOffset(srcMinX, srcMinY+dstY) - copy(dst.Pix[di:di+rowSize], src.Pix[si:si+rowSize]) - } - }) - - case *image.NRGBA64: - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - si := src.PixOffset(srcMinX, srcMinY+dstY) - for dstX := 0; dstX < dstW; dstX++ { - - dst.Pix[di+0] = src.Pix[si+0] - dst.Pix[di+1] = src.Pix[si+2] - dst.Pix[di+2] = src.Pix[si+4] - dst.Pix[di+3] = src.Pix[si+6] - - di += 4 - si += 8 - - } - } - }) - - case *image.RGBA: - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - si := src.PixOffset(srcMinX, srcMinY+dstY) - for dstX := 0; dstX < dstW; dstX++ { - - a := src.Pix[si+3] - dst.Pix[di+3] = a - switch a { - case 0: - dst.Pix[di+0] = 0 - dst.Pix[di+1] = 0 - dst.Pix[di+2] = 0 - case 0xff: - dst.Pix[di+0] = src.Pix[si+0] - dst.Pix[di+1] = src.Pix[si+1] - dst.Pix[di+2] = src.Pix[si+2] - default: - var tmp uint16 - tmp = uint16(src.Pix[si+0]) * 0xff / uint16(a) - dst.Pix[di+0] = uint8(tmp) - tmp = uint16(src.Pix[si+1]) * 0xff / uint16(a) - dst.Pix[di+1] = uint8(tmp) - tmp = uint16(src.Pix[si+2]) * 0xff / uint16(a) - dst.Pix[di+2] = uint8(tmp) - } - - di += 4 - si += 4 - - } - } - }) - - case *image.RGBA64: - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - si := src.PixOffset(srcMinX, srcMinY+dstY) - for dstX := 0; dstX < dstW; dstX++ { - - a := src.Pix[si+6] - dst.Pix[di+3] = a - switch a { - case 0: - dst.Pix[di+0] = 0 - dst.Pix[di+1] = 0 - dst.Pix[di+2] = 0 - case 0xff: - dst.Pix[di+0] = src.Pix[si+0] - dst.Pix[di+1] = src.Pix[si+2] - dst.Pix[di+2] = src.Pix[si+4] - default: - var tmp uint16 - tmp = uint16(src.Pix[si+0]) * 0xff / uint16(a) - dst.Pix[di+0] = uint8(tmp) - tmp = uint16(src.Pix[si+2]) * 0xff / uint16(a) - dst.Pix[di+1] = uint8(tmp) - tmp = uint16(src.Pix[si+4]) * 0xff / uint16(a) - dst.Pix[di+2] = uint8(tmp) - } - - di += 4 - si += 8 - - } - } - }) - - case *image.Gray: - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - si := src.PixOffset(srcMinX, srcMinY+dstY) - for dstX := 0; dstX < dstW; dstX++ { - - c := src.Pix[si] - dst.Pix[di+0] = c - dst.Pix[di+1] = c - dst.Pix[di+2] = c - dst.Pix[di+3] = 0xff - - di += 4 - si += 1 - - } - } - }) - - case *image.Gray16: - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - si := src.PixOffset(srcMinX, srcMinY+dstY) - for dstX := 0; dstX < dstW; dstX++ { - - c := src.Pix[si] - dst.Pix[di+0] = c - dst.Pix[di+1] = c - dst.Pix[di+2] = c - dst.Pix[di+3] = 0xff - - di += 4 - si += 2 - - } - } - }) - - case *image.YCbCr: - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - for dstX := 0; dstX < dstW; dstX++ { - - srcX := srcMinX + dstX - srcY := srcMinY + dstY - siy := src.YOffset(srcX, srcY) - sic := src.COffset(srcX, srcY) - r, g, b := color.YCbCrToRGB(src.Y[siy], src.Cb[sic], src.Cr[sic]) - dst.Pix[di+0] = r - dst.Pix[di+1] = g - dst.Pix[di+2] = b - dst.Pix[di+3] = 0xff - - di += 4 - - } - } - }) - - case *image.Paletted: - plen := len(src.Palette) - pnew := make([]color.NRGBA, plen) - for i := 0; i < plen; i++ { - pnew[i] = color.NRGBAModel.Convert(src.Palette[i]).(color.NRGBA) - } - - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - si := src.PixOffset(srcMinX, srcMinY+dstY) - for dstX := 0; dstX < dstW; dstX++ { - - c := pnew[src.Pix[si]] - dst.Pix[di+0] = c.R - dst.Pix[di+1] = c.G - dst.Pix[di+2] = c.B - dst.Pix[di+3] = c.A - - di += 4 - si += 1 - - } - } - }) - - default: - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - di := dst.PixOffset(0, dstY) - for dstX := 0; dstX < dstW; dstX++ { - - c := color.NRGBAModel.Convert(img.At(srcMinX+dstX, srcMinY+dstY)).(color.NRGBA) - dst.Pix[di+0] = c.R - dst.Pix[di+1] = c.G - dst.Pix[di+2] = c.B - dst.Pix[di+3] = c.A - - di += 4 - - } - } - }) - - } - - return dst -} - -// This function used internally to convert any image type to NRGBA if needed. -func toNRGBA(img image.Image) *image.NRGBA { - srcBounds := img.Bounds() - if srcBounds.Min.X == 0 && srcBounds.Min.Y == 0 { - if src0, ok := img.(*image.NRGBA); ok { - return src0 - } - } - return Clone(img) -} diff --git a/vendor/github.com/disintegration/imaging/histogram.go b/vendor/github.com/disintegration/imaging/histogram.go deleted file mode 100755 index aef3338..0000000 --- a/vendor/github.com/disintegration/imaging/histogram.go +++ /dev/null @@ -1,43 +0,0 @@ -package imaging - -import ( - "image" -) - -// Histogram returns a normalized histogram of an image. -// -// Resulting histogram is represented as an array of 256 floats, where -// histogram[i] is a probability of a pixel being of a particular luminance i. -func Histogram(img image.Image) [256]float64 { - src := toNRGBA(img) - width := src.Bounds().Max.X - height := src.Bounds().Max.Y - - var histogram [256]float64 - var total float64 - - if width == 0 || height == 0 { - return histogram - } - - for y := 0; y < height; y++ { - for x := 0; x < width; x++ { - i := y*src.Stride + x*4 - - r := src.Pix[i+0] - g := src.Pix[i+1] - b := src.Pix[i+2] - - var y float32 = 0.299*float32(r) + 0.587*float32(g) + 0.114*float32(b) - - histogram[int(y+0.5)]++ - total++ - } - } - - for i := 0; i < 256; i++ { - histogram[i] = histogram[i] / total - } - - return histogram -} diff --git a/vendor/github.com/disintegration/imaging/resize.go b/vendor/github.com/disintegration/imaging/resize.go deleted file mode 100755 index b21eed5..0000000 --- a/vendor/github.com/disintegration/imaging/resize.go +++ /dev/null @@ -1,585 +0,0 @@ -package imaging - -import ( - "image" - "math" -) - -type iwpair struct { - i int - w int32 -} - -type pweights struct { - iwpairs []iwpair - wsum int32 -} - -func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) []pweights { - du := float64(srcSize) / float64(dstSize) - scale := du - if scale < 1.0 { - scale = 1.0 - } - ru := math.Ceil(scale * filter.Support) - - out := make([]pweights, dstSize) - - for v := 0; v < dstSize; v++ { - fu := (float64(v)+0.5)*du - 0.5 - - startu := int(math.Ceil(fu - ru)) - if startu < 0 { - startu = 0 - } - endu := int(math.Floor(fu + ru)) - if endu > srcSize-1 { - endu = srcSize - 1 - } - - wsum := int32(0) - for u := startu; u <= endu; u++ { - w := int32(0xff * filter.Kernel((float64(u)-fu)/scale)) - if w != 0 { - wsum += w - out[v].iwpairs = append(out[v].iwpairs, iwpair{u, w}) - } - } - out[v].wsum = wsum - } - - return out -} - -// Resize resizes the image to the specified width and height using the specified resampling -// filter and returns the transformed image. If one of width or height is 0, the image aspect -// ratio is preserved. -// -// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, -// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. -// -// Usage example: -// -// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos) -// -func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { - dstW, dstH := width, height - - if dstW < 0 || dstH < 0 { - return &image.NRGBA{} - } - if dstW == 0 && dstH == 0 { - return &image.NRGBA{} - } - - src := toNRGBA(img) - - srcW := src.Bounds().Max.X - srcH := src.Bounds().Max.Y - - if srcW <= 0 || srcH <= 0 { - return &image.NRGBA{} - } - - // if new width or height is 0 then preserve aspect ratio, minimum 1px - if dstW == 0 { - tmpW := float64(dstH) * float64(srcW) / float64(srcH) - dstW = int(math.Max(1.0, math.Floor(tmpW+0.5))) - } - if dstH == 0 { - tmpH := float64(dstW) * float64(srcH) / float64(srcW) - dstH = int(math.Max(1.0, math.Floor(tmpH+0.5))) - } - - var dst *image.NRGBA - - if filter.Support <= 0.0 { - // nearest-neighbor special case - dst = resizeNearest(src, dstW, dstH) - - } else { - // two-pass resize - if srcW != dstW { - dst = resizeHorizontal(src, dstW, filter) - } else { - dst = src - } - - if srcH != dstH { - dst = resizeVertical(dst, dstH, filter) - } - } - - return dst -} - -func resizeHorizontal(src *image.NRGBA, width int, filter ResampleFilter) *image.NRGBA { - srcBounds := src.Bounds() - srcW := srcBounds.Max.X - srcH := srcBounds.Max.Y - - dstW := width - dstH := srcH - - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - weights := precomputeWeights(dstW, srcW, filter) - - parallel(dstH, func(partStart, partEnd int) { - for dstY := partStart; dstY < partEnd; dstY++ { - for dstX := 0; dstX < dstW; dstX++ { - var c [4]int64 - for _, iw := range weights[dstX].iwpairs { - i := dstY*src.Stride + iw.i*4 - a := int64(src.Pix[i+3]) * int64(iw.w) - c[0] += int64(src.Pix[i+0]) * a - c[1] += int64(src.Pix[i+1]) * a - c[2] += int64(src.Pix[i+2]) * a - c[3] += a - } - j := dstY*dst.Stride + dstX*4 - sum := weights[dstX].wsum - dst.Pix[j+0] = clampint32(int32(float64(c[0])/float64(c[3]) + 0.5)) - dst.Pix[j+1] = clampint32(int32(float64(c[1])/float64(c[3]) + 0.5)) - dst.Pix[j+2] = clampint32(int32(float64(c[2])/float64(c[3]) + 0.5)) - dst.Pix[j+3] = clampint32(int32(float64(c[3])/float64(sum) + 0.5)) - } - } - }) - - return dst -} - -func resizeVertical(src *image.NRGBA, height int, filter ResampleFilter) *image.NRGBA { - srcBounds := src.Bounds() - srcW := srcBounds.Max.X - srcH := srcBounds.Max.Y - - dstW := srcW - dstH := height - - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - weights := precomputeWeights(dstH, srcH, filter) - - parallel(dstW, func(partStart, partEnd int) { - - for dstX := partStart; dstX < partEnd; dstX++ { - for dstY := 0; dstY < dstH; dstY++ { - var c [4]int64 - for _, iw := range weights[dstY].iwpairs { - i := iw.i*src.Stride + dstX*4 - a := int64(src.Pix[i+3]) * int64(iw.w) - c[0] += int64(src.Pix[i+0]) * a - c[1] += int64(src.Pix[i+1]) * a - c[2] += int64(src.Pix[i+2]) * a - c[3] += a - } - j := dstY*dst.Stride + dstX*4 - sum := weights[dstY].wsum - dst.Pix[j+0] = clampint32(int32(float64(c[0])/float64(c[3]) + 0.5)) - dst.Pix[j+1] = clampint32(int32(float64(c[1])/float64(c[3]) + 0.5)) - dst.Pix[j+2] = clampint32(int32(float64(c[2])/float64(c[3]) + 0.5)) - dst.Pix[j+3] = clampint32(int32(float64(c[3])/float64(sum) + 0.5)) - } - } - - }) - - return dst -} - -// fast nearest-neighbor resize, no filtering -func resizeNearest(src *image.NRGBA, width, height int) *image.NRGBA { - dstW, dstH := width, height - - srcBounds := src.Bounds() - srcW := srcBounds.Max.X - srcH := srcBounds.Max.Y - - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - dx := float64(srcW) / float64(dstW) - dy := float64(srcH) / float64(dstH) - - parallel(dstH, func(partStart, partEnd int) { - - for dstY := partStart; dstY < partEnd; dstY++ { - fy := (float64(dstY)+0.5)*dy - 0.5 - - for dstX := 0; dstX < dstW; dstX++ { - fx := (float64(dstX)+0.5)*dx - 0.5 - - srcX := int(math.Min(math.Max(math.Floor(fx+0.5), 0.0), float64(srcW))) - srcY := int(math.Min(math.Max(math.Floor(fy+0.5), 0.0), float64(srcH))) - - srcOff := srcY*src.Stride + srcX*4 - dstOff := dstY*dst.Stride + dstX*4 - - copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) - } - } - - }) - - return dst -} - -// Fit scales down the image using the specified resample filter to fit the specified -// maximum width and height and returns the transformed image. -// -// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, -// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. -// -// Usage example: -// -// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos) -// -func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { - maxW, maxH := width, height - - if maxW <= 0 || maxH <= 0 { - return &image.NRGBA{} - } - - srcBounds := img.Bounds() - srcW := srcBounds.Dx() - srcH := srcBounds.Dy() - - if srcW <= 0 || srcH <= 0 { - return &image.NRGBA{} - } - - if srcW <= maxW && srcH <= maxH { - return Clone(img) - } - - srcAspectRatio := float64(srcW) / float64(srcH) - maxAspectRatio := float64(maxW) / float64(maxH) - - var newW, newH int - if srcAspectRatio > maxAspectRatio { - newW = maxW - newH = int(float64(newW) / srcAspectRatio) - } else { - newH = maxH - newW = int(float64(newH) * srcAspectRatio) - } - - return Resize(img, newW, newH, filter) -} - -// Fill scales the image to the smallest possible size that will cover the specified dimensions, -// crops the resized image to the specified dimensions using the given anchor point and returns -// the transformed image. -// -// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, -// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. -// -// Usage example: -// -// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos) -// -func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA { - minW, minH := width, height - - if minW <= 0 || minH <= 0 { - return &image.NRGBA{} - } - - srcBounds := img.Bounds() - srcW := srcBounds.Dx() - srcH := srcBounds.Dy() - - if srcW <= 0 || srcH <= 0 { - return &image.NRGBA{} - } - - if srcW == minW && srcH == minH { - return Clone(img) - } - - srcAspectRatio := float64(srcW) / float64(srcH) - minAspectRatio := float64(minW) / float64(minH) - - var tmp *image.NRGBA - if srcAspectRatio < minAspectRatio { - tmp = Resize(img, minW, 0, filter) - } else { - tmp = Resize(img, 0, minH, filter) - } - - return CropAnchor(tmp, minW, minH, anchor) -} - -// Thumbnail scales the image up or down using the specified resample filter, crops it -// to the specified width and hight and returns the transformed image. -// -// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, -// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. -// -// Usage example: -// -// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos) -// -func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { - return Fill(img, width, height, Center, filter) -} - -// Resample filter struct. It can be used to make custom filters. -// -// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, -// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. -// -// General filter recommendations: -// -// - Lanczos -// Probably the best resampling filter for photographic images yielding sharp results, -// but it's slower than cubic filters (see below). -// -// - CatmullRom -// A sharp cubic filter. It's a good filter for both upscaling and downscaling if sharp results are needed. -// -// - MitchellNetravali -// A high quality cubic filter that produces smoother results with less ringing than CatmullRom. -// -// - BSpline -// A good filter if a very smooth output is needed. -// -// - Linear -// Bilinear interpolation filter, produces reasonably good, smooth output. It's faster than cubic filters. -// -// - Box -// Simple and fast resampling filter appropriate for downscaling. -// When upscaling it's similar to NearestNeighbor. -// -// - NearestNeighbor -// Fastest resample filter, no antialiasing at all. Rarely used. -// -type ResampleFilter struct { - Support float64 - Kernel func(float64) float64 -} - -// Nearest-neighbor filter, no anti-aliasing. -var NearestNeighbor ResampleFilter - -// Box filter (averaging pixels). -var Box ResampleFilter - -// Linear filter. -var Linear ResampleFilter - -// Hermite cubic spline filter (BC-spline; B=0; C=0). -var Hermite ResampleFilter - -// Mitchell-Netravali cubic filter (BC-spline; B=1/3; C=1/3). -var MitchellNetravali ResampleFilter - -// Catmull-Rom - sharp cubic filter (BC-spline; B=0; C=0.5). -var CatmullRom ResampleFilter - -// Cubic B-spline - smooth cubic filter (BC-spline; B=1; C=0). -var BSpline ResampleFilter - -// Gaussian Blurring Filter. -var Gaussian ResampleFilter - -// Bartlett-windowed sinc filter (3 lobes). -var Bartlett ResampleFilter - -// Lanczos filter (3 lobes). -var Lanczos ResampleFilter - -// Hann-windowed sinc filter (3 lobes). -var Hann ResampleFilter - -// Hamming-windowed sinc filter (3 lobes). -var Hamming ResampleFilter - -// Blackman-windowed sinc filter (3 lobes). -var Blackman ResampleFilter - -// Welch-windowed sinc filter (parabolic window, 3 lobes). -var Welch ResampleFilter - -// Cosine-windowed sinc filter (3 lobes). -var Cosine ResampleFilter - -func bcspline(x, b, c float64) float64 { - x = math.Abs(x) - if x < 1.0 { - return ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6 - } - if x < 2.0 { - return ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6 - } - return 0 -} - -func sinc(x float64) float64 { - if x == 0 { - return 1 - } - return math.Sin(math.Pi*x) / (math.Pi * x) -} - -func init() { - NearestNeighbor = ResampleFilter{ - Support: 0.0, // special case - not applying the filter - } - - Box = ResampleFilter{ - Support: 0.5, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x <= 0.5 { - return 1.0 - } - return 0 - }, - } - - Linear = ResampleFilter{ - Support: 1.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 1.0 { - return 1.0 - x - } - return 0 - }, - } - - Hermite = ResampleFilter{ - Support: 1.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 1.0 { - return bcspline(x, 0.0, 0.0) - } - return 0 - }, - } - - MitchellNetravali = ResampleFilter{ - Support: 2.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 2.0 { - return bcspline(x, 1.0/3.0, 1.0/3.0) - } - return 0 - }, - } - - CatmullRom = ResampleFilter{ - Support: 2.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 2.0 { - return bcspline(x, 0.0, 0.5) - } - return 0 - }, - } - - BSpline = ResampleFilter{ - Support: 2.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 2.0 { - return bcspline(x, 1.0, 0.0) - } - return 0 - }, - } - - Gaussian = ResampleFilter{ - Support: 2.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 2.0 { - return math.Exp(-2 * x * x) - } - return 0 - }, - } - - Bartlett = ResampleFilter{ - Support: 3.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 3.0 { - return sinc(x) * (3.0 - x) / 3.0 - } - return 0 - }, - } - - Lanczos = ResampleFilter{ - Support: 3.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 3.0 { - return sinc(x) * sinc(x/3.0) - } - return 0 - }, - } - - Hann = ResampleFilter{ - Support: 3.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 3.0 { - return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0)) - } - return 0 - }, - } - - Hamming = ResampleFilter{ - Support: 3.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 3.0 { - return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0)) - } - return 0 - }, - } - - Blackman = ResampleFilter{ - Support: 3.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 3.0 { - return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0)) - } - return 0 - }, - } - - Welch = ResampleFilter{ - Support: 3.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 3.0 { - return sinc(x) * (1.0 - (x * x / 9.0)) - } - return 0 - }, - } - - Cosine = ResampleFilter{ - Support: 3.0, - Kernel: func(x float64) float64 { - x = math.Abs(x) - if x < 3.0 { - return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0)) - } - return 0 - }, - } -} diff --git a/vendor/github.com/disintegration/imaging/tools.go b/vendor/github.com/disintegration/imaging/tools.go deleted file mode 100755 index 76f1444..0000000 --- a/vendor/github.com/disintegration/imaging/tools.go +++ /dev/null @@ -1,201 +0,0 @@ -package imaging - -import ( - "image" - "math" -) - -// Anchor is the anchor point for image alignment. -type Anchor int - -const ( - Center Anchor = iota - TopLeft - Top - TopRight - Left - Right - BottomLeft - Bottom - BottomRight -) - -func anchorPt(b image.Rectangle, w, h int, anchor Anchor) image.Point { - var x, y int - switch anchor { - case TopLeft: - x = b.Min.X - y = b.Min.Y - case Top: - x = b.Min.X + (b.Dx()-w)/2 - y = b.Min.Y - case TopRight: - x = b.Max.X - w - y = b.Min.Y - case Left: - x = b.Min.X - y = b.Min.Y + (b.Dy()-h)/2 - case Right: - x = b.Max.X - w - y = b.Min.Y + (b.Dy()-h)/2 - case BottomLeft: - x = b.Min.X - y = b.Max.Y - h - case Bottom: - x = b.Min.X + (b.Dx()-w)/2 - y = b.Max.Y - h - case BottomRight: - x = b.Max.X - w - y = b.Max.Y - h - default: - x = b.Min.X + (b.Dx()-w)/2 - y = b.Min.Y + (b.Dy()-h)/2 - } - return image.Pt(x, y) -} - -// Crop cuts out a rectangular region with the specified bounds -// from the image and returns the cropped image. -func Crop(img image.Image, rect image.Rectangle) *image.NRGBA { - src := toNRGBA(img) - srcRect := rect.Sub(img.Bounds().Min) - sub := src.SubImage(srcRect) - return Clone(sub) // New image Bounds().Min point will be (0, 0) -} - -// CropAnchor cuts out a rectangular region with the specified size -// from the image using the specified anchor point and returns the cropped image. -func CropAnchor(img image.Image, width, height int, anchor Anchor) *image.NRGBA { - srcBounds := img.Bounds() - pt := anchorPt(srcBounds, width, height, anchor) - r := image.Rect(0, 0, width, height).Add(pt) - b := srcBounds.Intersect(r) - return Crop(img, b) -} - -// CropCenter cuts out a rectangular region with the specified size -// from the center of the image and returns the cropped image. -func CropCenter(img image.Image, width, height int) *image.NRGBA { - return CropAnchor(img, width, height, Center) -} - -// Paste pastes the img image to the background image at the specified position and returns the combined image. -func Paste(background, img image.Image, pos image.Point) *image.NRGBA { - src := toNRGBA(img) - dst := Clone(background) // cloned image bounds start at (0, 0) - startPt := pos.Sub(background.Bounds().Min) // so we should translate start point - endPt := startPt.Add(src.Bounds().Size()) - pasteBounds := image.Rectangle{startPt, endPt} - - if dst.Bounds().Overlaps(pasteBounds) { - intersectBounds := dst.Bounds().Intersect(pasteBounds) - - rowSize := intersectBounds.Dx() * 4 - numRows := intersectBounds.Dy() - - srcStartX := intersectBounds.Min.X - pasteBounds.Min.X - srcStartY := intersectBounds.Min.Y - pasteBounds.Min.Y - - i0 := dst.PixOffset(intersectBounds.Min.X, intersectBounds.Min.Y) - j0 := src.PixOffset(srcStartX, srcStartY) - - di := dst.Stride - dj := src.Stride - - for row := 0; row < numRows; row++ { - copy(dst.Pix[i0:i0+rowSize], src.Pix[j0:j0+rowSize]) - i0 += di - j0 += dj - } - } - - return dst -} - -// PasteCenter pastes the img image to the center of the background image and returns the combined image. -func PasteCenter(background, img image.Image) *image.NRGBA { - bgBounds := background.Bounds() - bgW := bgBounds.Dx() - bgH := bgBounds.Dy() - bgMinX := bgBounds.Min.X - bgMinY := bgBounds.Min.Y - - centerX := bgMinX + bgW/2 - centerY := bgMinY + bgH/2 - - x0 := centerX - img.Bounds().Dx()/2 - y0 := centerY - img.Bounds().Dy()/2 - - return Paste(background, img, image.Pt(x0, y0)) -} - -// Overlay draws the img image over the background image at given position -// and returns the combined image. Opacity parameter is the opacity of the img -// image layer, used to compose the images, it must be from 0.0 to 1.0. -// -// Usage examples: -// -// // draw the sprite over the background at position (50, 50) -// dstImage := imaging.Overlay(backgroundImage, spriteImage, image.Pt(50, 50), 1.0) -// -// // blend two opaque images of the same size -// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5) -// -func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA { - opacity = math.Min(math.Max(opacity, 0.0), 1.0) // check: 0.0 <= opacity <= 1.0 - - src := toNRGBA(img) - dst := Clone(background) // cloned image bounds start at (0, 0) - startPt := pos.Sub(background.Bounds().Min) // so we should translate start point - endPt := startPt.Add(src.Bounds().Size()) - pasteBounds := image.Rectangle{startPt, endPt} - - if dst.Bounds().Overlaps(pasteBounds) { - intersectBounds := dst.Bounds().Intersect(pasteBounds) - - for y := intersectBounds.Min.Y; y < intersectBounds.Max.Y; y++ { - for x := intersectBounds.Min.X; x < intersectBounds.Max.X; x++ { - i := y*dst.Stride + x*4 - - srcX := x - pasteBounds.Min.X - srcY := y - pasteBounds.Min.Y - j := srcY*src.Stride + srcX*4 - - a1 := float64(dst.Pix[i+3]) - a2 := float64(src.Pix[j+3]) - - coef2 := opacity * a2 / 255.0 - coef1 := (1 - coef2) * a1 / 255.0 - coefSum := coef1 + coef2 - coef1 /= coefSum - coef2 /= coefSum - - dst.Pix[i+0] = uint8(float64(dst.Pix[i+0])*coef1 + float64(src.Pix[j+0])*coef2) - dst.Pix[i+1] = uint8(float64(dst.Pix[i+1])*coef1 + float64(src.Pix[j+1])*coef2) - dst.Pix[i+2] = uint8(float64(dst.Pix[i+2])*coef1 + float64(src.Pix[j+2])*coef2) - dst.Pix[i+3] = uint8(math.Min(a1+a2*opacity*(255.0-a1)/255.0, 255.0)) - } - } - } - - return dst -} - -// OverlayCenter overlays the img image to the center of the background image and -// returns the combined image. Opacity parameter is the opacity of the img -// image layer, used to compose the images, it must be from 0.0 to 1.0. -func OverlayCenter(background, img image.Image, opacity float64) *image.NRGBA { - bgBounds := background.Bounds() - bgW := bgBounds.Dx() - bgH := bgBounds.Dy() - bgMinX := bgBounds.Min.X - bgMinY := bgBounds.Min.Y - - centerX := bgMinX + bgW/2 - centerY := bgMinY + bgH/2 - - x0 := centerX - img.Bounds().Dx()/2 - y0 := centerY - img.Bounds().Dy()/2 - - return Overlay(background, img, image.Point{x0, y0}, opacity) -} diff --git a/vendor/github.com/disintegration/imaging/transform.go b/vendor/github.com/disintegration/imaging/transform.go deleted file mode 100755 index a11601b..0000000 --- a/vendor/github.com/disintegration/imaging/transform.go +++ /dev/null @@ -1,201 +0,0 @@ -package imaging - -import ( - "image" -) - -// Rotate90 rotates the image 90 degrees counterclockwise and returns the transformed image. -func Rotate90(img image.Image) *image.NRGBA { - src := toNRGBA(img) - srcW := src.Bounds().Max.X - srcH := src.Bounds().Max.Y - dstW := srcH - dstH := srcW - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - parallel(dstH, func(partStart, partEnd int) { - - for dstY := partStart; dstY < partEnd; dstY++ { - for dstX := 0; dstX < dstW; dstX++ { - srcX := dstH - dstY - 1 - srcY := dstX - - srcOff := srcY*src.Stride + srcX*4 - dstOff := dstY*dst.Stride + dstX*4 - - copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) - } - } - - }) - - return dst -} - -// Rotate180 rotates the image 180 degrees counterclockwise and returns the transformed image. -func Rotate180(img image.Image) *image.NRGBA { - src := toNRGBA(img) - srcW := src.Bounds().Max.X - srcH := src.Bounds().Max.Y - dstW := srcW - dstH := srcH - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - parallel(dstH, func(partStart, partEnd int) { - - for dstY := partStart; dstY < partEnd; dstY++ { - for dstX := 0; dstX < dstW; dstX++ { - srcX := dstW - dstX - 1 - srcY := dstH - dstY - 1 - - srcOff := srcY*src.Stride + srcX*4 - dstOff := dstY*dst.Stride + dstX*4 - - copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) - } - } - - }) - - return dst -} - -// Rotate270 rotates the image 270 degrees counterclockwise and returns the transformed image. -func Rotate270(img image.Image) *image.NRGBA { - src := toNRGBA(img) - srcW := src.Bounds().Max.X - srcH := src.Bounds().Max.Y - dstW := srcH - dstH := srcW - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - parallel(dstH, func(partStart, partEnd int) { - - for dstY := partStart; dstY < partEnd; dstY++ { - for dstX := 0; dstX < dstW; dstX++ { - srcX := dstY - srcY := dstW - dstX - 1 - - srcOff := srcY*src.Stride + srcX*4 - dstOff := dstY*dst.Stride + dstX*4 - - copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) - } - } - - }) - - return dst -} - -// FlipH flips the image horizontally (from left to right) and returns the transformed image. -func FlipH(img image.Image) *image.NRGBA { - src := toNRGBA(img) - srcW := src.Bounds().Max.X - srcH := src.Bounds().Max.Y - dstW := srcW - dstH := srcH - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - parallel(dstH, func(partStart, partEnd int) { - - for dstY := partStart; dstY < partEnd; dstY++ { - for dstX := 0; dstX < dstW; dstX++ { - srcX := dstW - dstX - 1 - srcY := dstY - - srcOff := srcY*src.Stride + srcX*4 - dstOff := dstY*dst.Stride + dstX*4 - - copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) - } - } - - }) - - return dst -} - -// FlipV flips the image vertically (from top to bottom) and returns the transformed image. -func FlipV(img image.Image) *image.NRGBA { - src := toNRGBA(img) - srcW := src.Bounds().Max.X - srcH := src.Bounds().Max.Y - dstW := srcW - dstH := srcH - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - parallel(dstH, func(partStart, partEnd int) { - - for dstY := partStart; dstY < partEnd; dstY++ { - for dstX := 0; dstX < dstW; dstX++ { - srcX := dstX - srcY := dstH - dstY - 1 - - srcOff := srcY*src.Stride + srcX*4 - dstOff := dstY*dst.Stride + dstX*4 - - copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) - } - } - - }) - - return dst -} - -// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise. -func Transpose(img image.Image) *image.NRGBA { - src := toNRGBA(img) - srcW := src.Bounds().Max.X - srcH := src.Bounds().Max.Y - dstW := srcH - dstH := srcW - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - parallel(dstH, func(partStart, partEnd int) { - - for dstY := partStart; dstY < partEnd; dstY++ { - for dstX := 0; dstX < dstW; dstX++ { - srcX := dstY - srcY := dstX - - srcOff := srcY*src.Stride + srcX*4 - dstOff := dstY*dst.Stride + dstX*4 - - copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) - } - } - - }) - - return dst -} - -// Transverse flips the image vertically and rotates 90 degrees counter-clockwise. -func Transverse(img image.Image) *image.NRGBA { - src := toNRGBA(img) - srcW := src.Bounds().Max.X - srcH := src.Bounds().Max.Y - dstW := srcH - dstH := srcW - dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) - - parallel(dstH, func(partStart, partEnd int) { - - for dstY := partStart; dstY < partEnd; dstY++ { - for dstX := 0; dstX < dstW; dstX++ { - srcX := dstH - dstY - 1 - srcY := dstW - dstX - 1 - - srcOff := srcY*src.Stride + srcX*4 - dstOff := dstY*dst.Stride + dstX*4 - - copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) - } - } - - }) - - return dst -} diff --git a/vendor/github.com/disintegration/imaging/utils.go b/vendor/github.com/disintegration/imaging/utils.go deleted file mode 100755 index 8b1ab8a..0000000 --- a/vendor/github.com/disintegration/imaging/utils.go +++ /dev/null @@ -1,77 +0,0 @@ -package imaging - -import ( - "math" - "runtime" - "sync" - "sync/atomic" -) - -var parallelizationEnabled = true - -// if GOMAXPROCS = 1: no goroutines used -// if GOMAXPROCS > 1: spawn N=GOMAXPROCS workers in separate goroutines -func parallel(dataSize int, fn func(partStart, partEnd int)) { - numGoroutines := 1 - partSize := dataSize - - if parallelizationEnabled { - numProcs := runtime.GOMAXPROCS(0) - if numProcs > 1 { - numGoroutines = numProcs - partSize = dataSize / (numGoroutines * 10) - if partSize < 1 { - partSize = 1 - } - } - } - - if numGoroutines == 1 { - fn(0, dataSize) - } else { - var wg sync.WaitGroup - wg.Add(numGoroutines) - idx := uint64(0) - - for p := 0; p < numGoroutines; p++ { - go func() { - defer wg.Done() - for { - partStart := int(atomic.AddUint64(&idx, uint64(partSize))) - partSize - if partStart >= dataSize { - break - } - partEnd := partStart + partSize - if partEnd > dataSize { - partEnd = dataSize - } - fn(partStart, partEnd) - } - }() - } - - wg.Wait() - } -} - -func absint(i int) int { - if i < 0 { - return -i - } - return i -} - -// clamp & round float64 to uint8 (0..255) -func clamp(v float64) uint8 { - return uint8(math.Min(math.Max(v, 0.0), 255.0) + 0.5) -} - -// clamp int32 to uint8 (0..255) -func clampint32(v int32) uint8 { - if v < 0 { - return 0 - } else if v > 255 { - return 255 - } - return uint8(v) -} diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS deleted file mode 100755 index 3774919..0000000 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# This is the official list of Go-MySQL-Driver authors for copyright purposes. - -# If you are submitting a patch, please add your name or the name of the -# organization which holds the copyright to this list in alphabetical order. - -# Names should be added to this file as -# Name -# The email address is not required for organizations. -# Please keep the list sorted. - - -# Individual Persons - -Aaron Hopkins -Arne Hormann -Carlos Nieto -Chris Moos -Daniel Nichter -Daniël van Eeden -DisposaBoy -Frederick Mayle -Gustavo Kristic -Hanno Braun -Henri Yandell -Hirotaka Yamamoto -INADA Naoki -James Harr -Jian Zhen -Joshua Prunier -Julien Lefevre -Julien Schmidt -Kamil Dziedzic -Kevin Malachowski -Leonardo YongUk Kim -Luca Looz -Lucas Liu -Luke Scott -Michael Woolnough -Nicola Peduzzi -Paul Bonser -Runrioter Wung -Soroush Pour -Stan Putrya -Stanley Gunawan -Xiaobing Jiang -Xiuming Chen - -# Organizations - -Barracuda Networks, Inc. -Google Inc. -Stripe Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md deleted file mode 100755 index 381d918..0000000 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ /dev/null @@ -1,103 +0,0 @@ -## HEAD - -Changes: - - - Go 1.1 is no longer supported - - Use decimals field from MySQL to format time types (#249) - - Buffer optimizations (#269) - - TLS ServerName defaults to the host (#283) - -Bugfixes: - - - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) - - Fixed handling of queries without columns and rows (#255) - - Fixed a panic when SetKeepAlive() failed (#298) - - Support receiving ERR packet while reading rows (#321) - - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) - - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) - - Actually zero out bytes in handshake response (#378) - - Fixed race condition in registering LOAD DATA INFILE handler (#383) - - Fixed tests with MySQL 5.7.9+ (#380) - - QueryUnescape TLS config names (#397) - - Fixed "broken pipe" error by writing to closed socket (#390) - -New Features: - - Support for returning table alias on Columns() (#289, #359, #382) - - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318) - - Support for uint64 parameters with high bit set (#332, #345) - - Cleartext authentication plugin support (#327) - - - -## Version 1.2 (2014-06-03) - -Changes: - - - We switched back to a "rolling release". `go get` installs the current master branch again - - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver - - Exported errors to allow easy checking from application code - - Enabled TCP Keepalives on TCP connections - - Optimized INFILE handling (better buffer size calculation, lazy init, ...) - - The DSN parser also checks for a missing separating slash - - Faster binary date / datetime to string formatting - - Also exported the MySQLWarning type - - mysqlConn.Close returns the first error encountered instead of ignoring all errors - - writePacket() automatically writes the packet size to the header - - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets - -New Features: - - - `RegisterDial` allows the usage of a custom dial function to establish the network connection - - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter - - Logging of critical errors is configurable with `SetLogger` - - Google CloudSQL support - -Bugfixes: - - - Allow more than 32 parameters in prepared statements - - Various old_password fixes - - Fixed TestConcurrent test to pass Go's race detection - - Fixed appendLengthEncodedInteger for large numbers - - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) - - -## Version 1.1 (2013-11-02) - -Changes: - - - Go-MySQL-Driver now requires Go 1.1 - - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore - - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors - - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` - - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. - - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries - - Optimized the buffer for reading - - stmt.Query now caches column metadata - - New Logo - - Changed the copyright header to include all contributors - - Improved the LOAD INFILE documentation - - The driver struct is now exported to make the driver directly accessible - - Refactored the driver tests - - Added more benchmarks and moved all to a separate file - - Other small refactoring - -New Features: - - - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure - - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs - - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used - -Bugfixes: - - - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification - - Convert to DB timezone when inserting `time.Time` - - Splitted packets (more than 16MB) are now merged correctly - - Fixed false positive `io.EOF` errors when the data was fully read - - Avoid panics on reuse of closed connections - - Fixed empty string producing false nil values - - Fixed sign byte for positive TIME fields - - -## Version 1.0 (2013-05-14) - -Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md deleted file mode 100755 index 8fe16bc..0000000 --- a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# Contributing Guidelines - -## Reporting Issues - -Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). - -## Contributing Code - -By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. -Don't forget to add yourself to the AUTHORS file. - -### Code Review - -Everyone is invited to review and comment on pull requests. -If it looks fine to you, comment with "LGTM" (Looks good to me). - -If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. - -Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". - -## Development Ideas - -If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md deleted file mode 100755 index d9771f1..0000000 --- a/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,21 +0,0 @@ -### Issue description -Tell us what should happen and what happens instead - -### Example code -```go -If possible, please enter some example code here to reproduce the issue. -``` - -### Error log -``` -If you have an error log, please paste it here. -``` - -### Configuration -*Driver version (or git SHA):* - -*Go version:* run `go version` in your console - -*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20 - -*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10 diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE deleted file mode 100755 index 14e2f77..0000000 --- a/vendor/github.com/go-sql-driver/mysql/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md deleted file mode 100755 index 6f5c7eb..0000000 --- a/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,9 +0,0 @@ -### Description -Please explain the changes you made here. - -### Checklist -- [ ] Code compiles correctly -- [ ] Created tests which fail without the change (if possible) -- [ ] All tests passing -- [ ] Extended the README / documentation, if necessary -- [ ] Added myself / the copyright holder to the AUTHORS file diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md deleted file mode 100755 index f23e2c0..0000000 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ /dev/null @@ -1,420 +0,0 @@ -# Go-MySQL-Driver - -A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package - -![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") - -**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases) - -[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql) - ---------------------------------------- - * [Features](#features) - * [Requirements](#requirements) - * [Installation](#installation) - * [Usage](#usage) - * [DSN (Data Source Name)](#dsn-data-source-name) - * [Password](#password) - * [Protocol](#protocol) - * [Address](#address) - * [Parameters](#parameters) - * [Examples](#examples) - * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) - * [time.Time support](#timetime-support) - * [Unicode support](#unicode-support) - * [Testing / Development](#testing--development) - * [License](#license) - ---------------------------------------- - -## Features - * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") - * Native Go implementation. No C-bindings, just pure Go - * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc) - * Automatic handling of broken connections - * Automatic Connection Pooling *(by database/sql package)* - * Supports queries larger than 16MB - * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support. - * Intelligent `LONG DATA` handling in prepared statements - * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support - * Optional `time.Time` parsing - * Optional placeholder interpolation - -## Requirements - * Go 1.2 or higher - * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) - ---------------------------------------- - -## Installation -Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell: -```bash -$ go get github.com/go-sql-driver/mysql -``` -Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`. - -## Usage -_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then. - -Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: -```go -import "database/sql" -import _ DriverStatus - -db, err := sql.Open("mysql", "user:password@/dbname") -``` - -[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). - - -### DSN (Data Source Name) - -The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): -``` -[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] -``` - -A DSN in its fullest form: -``` -username:password@protocol(address)/dbname?param=value -``` - -Except for the databasename, all values are optional. So the minimal DSN is: -``` -/dbname -``` - -If you do not want to preselect a database, leave `dbname` empty: -``` -/ -``` -This has the same effect as an empty DSN string: -``` - -``` - -Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. - -#### Password -Passwords can consist of any character. Escaping is **not** necessary. - -#### Protocol -See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available. -In general you should use an Unix domain socket if available and TCP otherwise for best performance. - -#### Address -For TCP and UDP networks, addresses have the form `host:port`. -If `host` is a literal IPv6 address, it must be enclosed in square brackets. -The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. - -For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. - -#### Parameters -*Parameters are case-sensitive!* - -Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. - -##### `allowAllFiles` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. -[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) - -##### `allowCleartextPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. - -##### `allowOldPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` -`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). - -##### `charset` - -``` -Type: string -Valid Values: -Default: none -``` - -Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). - -Usage of the `charset` parameter is discouraged because it issues additional queries to the server. -Unless you need the fallback behavior, please use `collation` instead. - -##### `collation` - -``` -Type: string -Valid Values: -Default: utf8_general_ci -``` - -Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. - -A list of valid charsets for a server is retrievable with `SHOW COLLATION`. - -##### `clientFoundRows` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. - -##### `columnsWithAlias` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: - -``` -SELECT u.id FROM users as u -``` - -will return `u.id` instead of just `id` if `columnsWithAlias=true`. - -##### `interpolateParams` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. - -*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* - -##### `loc` - -``` -Type: string -Valid Values: -Default: UTC -``` - -Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details. - -Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. - -Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. - -##### `multiStatements` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. - -When `multiStatements` is used, `?` parameters must only be used in the first statement. - - -##### `parseTime` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` - - -##### `readTimeout` - -``` -Type: decimal number -Default: 0 -``` - -I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. - - -##### `strict` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`strict=true` enables the strict mode in which MySQL warnings are treated as errors. - -By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example. - - -##### `timeout` - -``` -Type: decimal number -Default: OS default -``` - -*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout). - - -##### `tls` - -``` -Type: bool / string -Valid Values: true, false, skip-verify, -Default: false -``` - -`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). - - -##### `writeTimeout` - -``` -Type: decimal number -Default: 0 -``` - -I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. - - -##### System Variables - -All other parameters are interpreted as system variables: - * `autocommit`: `"SET autocommit="` - * [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone="` - * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation="` - * `param`: `"SET ="` - -*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!* - -#### Examples -``` -user@unix(/path/to/socket)/dbname -``` - -``` -root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local -``` - -``` -user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true -``` - -Use the [strict mode](#strict) but ignore notes: -``` -user:password@/dbname?strict=true&sql_notes=false -``` - -TCP via IPv6: -``` -user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci -``` - -TCP on a remote host, e.g. Amazon RDS: -``` -id:password@tcp(your-amazonaws-uri.com:3306)/dbname -``` - -Google Cloud SQL on App Engine: -``` -user@cloudsql(project-id:instance-name)/dbname -``` - -TCP using default port (3306) on localhost: -``` -user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped -``` - -Use the default protocol (tcp) and host (localhost:3306): -``` -user:password@/dbname -``` - -No Database preselected: -``` -user:password@/ -``` - -### `LOAD DATA LOCAL INFILE` support -For this feature you need direct access to the package. Therefore you must change the import path (no `_`): -```go -import "github.com/go-sql-driver/mysql" -``` - -Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). - -To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. - -See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. - - -### `time.Time` support -The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. - -However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter. - -**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). - -Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. - - -### Unicode support -Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. - -Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. - -Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. - -See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. - - -## Testing / Development -To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. - -Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. -If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). - -See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. - ---------------------------------------- - -## License -Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) - -Mozilla summarizes the license scope as follows: -> MPL: The copyleft applies to any files containing MPLed code. - - -That means: - * You can **use** the **unchanged** source code both in private and commercially - * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) - * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged** - -Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license. - -You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) - -![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") - diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go deleted file mode 100755 index 565614e..0000000 --- a/vendor/github.com/go-sql-driver/mysql/appengine.go +++ /dev/null @@ -1,19 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// +build appengine - -package mysql - -import ( - "appengine/cloudsql" -) - -func init() { - RegisterDial("cloudsql", cloudsql.Dial) -} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go deleted file mode 100755 index 2001fea..0000000 --- a/vendor/github.com/go-sql-driver/mysql/buffer.go +++ /dev/null @@ -1,147 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "io" - "net" - "time" -) - -const defaultBufSize = 4096 - -// A buffer which is used for both reading and writing. -// This is possible since communication on each connection is synchronous. -// In other words, we can't write and read simultaneously on the same connection. -// The buffer is similar to bufio.Reader / Writer but zero-copy-ish -// Also highly optimized for this particular use case. -type buffer struct { - buf []byte - nc net.Conn - idx int - length int - timeout time.Duration -} - -func newBuffer(nc net.Conn) buffer { - var b [defaultBufSize]byte - return buffer{ - buf: b[:], - nc: nc, - } -} - -// fill reads into the buffer until at least _need_ bytes are in it -func (b *buffer) fill(need int) error { - n := b.length - - // move existing data to the beginning - if n > 0 && b.idx > 0 { - copy(b.buf[0:n], b.buf[b.idx:]) - } - - // grow buffer if necessary - // TODO: let the buffer shrink again at some point - // Maybe keep the org buf slice and swap back? - if need > len(b.buf) { - // Round up to the next multiple of the default size - newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) - copy(newBuf, b.buf) - b.buf = newBuf - } - - b.idx = 0 - - for { - if b.timeout > 0 { - if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { - return err - } - } - - nn, err := b.nc.Read(b.buf[n:]) - n += nn - - switch err { - case nil: - if n < need { - continue - } - b.length = n - return nil - - case io.EOF: - if n >= need { - b.length = n - return nil - } - return io.ErrUnexpectedEOF - - default: - return err - } - } -} - -// returns next N bytes from buffer. -// The returned slice is only guaranteed to be valid until the next read -func (b *buffer) readNext(need int) ([]byte, error) { - if b.length < need { - // refill - if err := b.fill(need); err != nil { - return nil, err - } - } - - offset := b.idx - b.idx += need - b.length -= need - return b.buf[offset:b.idx], nil -} - -// returns a buffer with the requested size. -// If possible, a slice from the existing buffer is returned. -// Otherwise a bigger buffer is made. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeBuffer(length int) []byte { - if b.length > 0 { - return nil - } - - // test (cheap) general case first - if length <= defaultBufSize || length <= cap(b.buf) { - return b.buf[:length] - } - - if length < maxPacketSize { - b.buf = make([]byte, length) - return b.buf - } - return make([]byte, length) -} - -// shortcut which can be used if the requested buffer is guaranteed to be -// smaller than defaultBufSize -// Only one buffer (total) can be used at a time. -func (b *buffer) takeSmallBuffer(length int) []byte { - if b.length == 0 { - return b.buf[:length] - } - return nil -} - -// takeCompleteBuffer returns the complete existing buffer. -// This can be used if the necessary buffer size is unknown. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeCompleteBuffer() []byte { - if b.length == 0 { - return b.buf - } - return nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go deleted file mode 100755 index 82079cf..0000000 --- a/vendor/github.com/go-sql-driver/mysql/collations.go +++ /dev/null @@ -1,250 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -const defaultCollation = "utf8_general_ci" - -// A list of available collations mapped to the internal ID. -// To update this map use the following MySQL query: -// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS -var collations = map[string]byte{ - "big5_chinese_ci": 1, - "latin2_czech_cs": 2, - "dec8_swedish_ci": 3, - "cp850_general_ci": 4, - "latin1_german1_ci": 5, - "hp8_english_ci": 6, - "koi8r_general_ci": 7, - "latin1_swedish_ci": 8, - "latin2_general_ci": 9, - "swe7_swedish_ci": 10, - "ascii_general_ci": 11, - "ujis_japanese_ci": 12, - "sjis_japanese_ci": 13, - "cp1251_bulgarian_ci": 14, - "latin1_danish_ci": 15, - "hebrew_general_ci": 16, - "tis620_thai_ci": 18, - "euckr_korean_ci": 19, - "latin7_estonian_cs": 20, - "latin2_hungarian_ci": 21, - "koi8u_general_ci": 22, - "cp1251_ukrainian_ci": 23, - "gb2312_chinese_ci": 24, - "greek_general_ci": 25, - "cp1250_general_ci": 26, - "latin2_croatian_ci": 27, - "gbk_chinese_ci": 28, - "cp1257_lithuanian_ci": 29, - "latin5_turkish_ci": 30, - "latin1_german2_ci": 31, - "armscii8_general_ci": 32, - "utf8_general_ci": 33, - "cp1250_czech_cs": 34, - "ucs2_general_ci": 35, - "cp866_general_ci": 36, - "keybcs2_general_ci": 37, - "macce_general_ci": 38, - "macroman_general_ci": 39, - "cp852_general_ci": 40, - "latin7_general_ci": 41, - "latin7_general_cs": 42, - "macce_bin": 43, - "cp1250_croatian_ci": 44, - "utf8mb4_general_ci": 45, - "utf8mb4_bin": 46, - "latin1_bin": 47, - "latin1_general_ci": 48, - "latin1_general_cs": 49, - "cp1251_bin": 50, - "cp1251_general_ci": 51, - "cp1251_general_cs": 52, - "macroman_bin": 53, - "utf16_general_ci": 54, - "utf16_bin": 55, - "utf16le_general_ci": 56, - "cp1256_general_ci": 57, - "cp1257_bin": 58, - "cp1257_general_ci": 59, - "utf32_general_ci": 60, - "utf32_bin": 61, - "utf16le_bin": 62, - "binary": 63, - "armscii8_bin": 64, - "ascii_bin": 65, - "cp1250_bin": 66, - "cp1256_bin": 67, - "cp866_bin": 68, - "dec8_bin": 69, - "greek_bin": 70, - "hebrew_bin": 71, - "hp8_bin": 72, - "keybcs2_bin": 73, - "koi8r_bin": 74, - "koi8u_bin": 75, - "latin2_bin": 77, - "latin5_bin": 78, - "latin7_bin": 79, - "cp850_bin": 80, - "cp852_bin": 81, - "swe7_bin": 82, - "utf8_bin": 83, - "big5_bin": 84, - "euckr_bin": 85, - "gb2312_bin": 86, - "gbk_bin": 87, - "sjis_bin": 88, - "tis620_bin": 89, - "ucs2_bin": 90, - "ujis_bin": 91, - "geostd8_general_ci": 92, - "geostd8_bin": 93, - "latin1_spanish_ci": 94, - "cp932_japanese_ci": 95, - "cp932_bin": 96, - "eucjpms_japanese_ci": 97, - "eucjpms_bin": 98, - "cp1250_polish_ci": 99, - "utf16_unicode_ci": 101, - "utf16_icelandic_ci": 102, - "utf16_latvian_ci": 103, - "utf16_romanian_ci": 104, - "utf16_slovenian_ci": 105, - "utf16_polish_ci": 106, - "utf16_estonian_ci": 107, - "utf16_spanish_ci": 108, - "utf16_swedish_ci": 109, - "utf16_turkish_ci": 110, - "utf16_czech_ci": 111, - "utf16_danish_ci": 112, - "utf16_lithuanian_ci": 113, - "utf16_slovak_ci": 114, - "utf16_spanish2_ci": 115, - "utf16_roman_ci": 116, - "utf16_persian_ci": 117, - "utf16_esperanto_ci": 118, - "utf16_hungarian_ci": 119, - "utf16_sinhala_ci": 120, - "utf16_german2_ci": 121, - "utf16_croatian_ci": 122, - "utf16_unicode_520_ci": 123, - "utf16_vietnamese_ci": 124, - "ucs2_unicode_ci": 128, - "ucs2_icelandic_ci": 129, - "ucs2_latvian_ci": 130, - "ucs2_romanian_ci": 131, - "ucs2_slovenian_ci": 132, - "ucs2_polish_ci": 133, - "ucs2_estonian_ci": 134, - "ucs2_spanish_ci": 135, - "ucs2_swedish_ci": 136, - "ucs2_turkish_ci": 137, - "ucs2_czech_ci": 138, - "ucs2_danish_ci": 139, - "ucs2_lithuanian_ci": 140, - "ucs2_slovak_ci": 141, - "ucs2_spanish2_ci": 142, - "ucs2_roman_ci": 143, - "ucs2_persian_ci": 144, - "ucs2_esperanto_ci": 145, - "ucs2_hungarian_ci": 146, - "ucs2_sinhala_ci": 147, - "ucs2_german2_ci": 148, - "ucs2_croatian_ci": 149, - "ucs2_unicode_520_ci": 150, - "ucs2_vietnamese_ci": 151, - "ucs2_general_mysql500_ci": 159, - "utf32_unicode_ci": 160, - "utf32_icelandic_ci": 161, - "utf32_latvian_ci": 162, - "utf32_romanian_ci": 163, - "utf32_slovenian_ci": 164, - "utf32_polish_ci": 165, - "utf32_estonian_ci": 166, - "utf32_spanish_ci": 167, - "utf32_swedish_ci": 168, - "utf32_turkish_ci": 169, - "utf32_czech_ci": 170, - "utf32_danish_ci": 171, - "utf32_lithuanian_ci": 172, - "utf32_slovak_ci": 173, - "utf32_spanish2_ci": 174, - "utf32_roman_ci": 175, - "utf32_persian_ci": 176, - "utf32_esperanto_ci": 177, - "utf32_hungarian_ci": 178, - "utf32_sinhala_ci": 179, - "utf32_german2_ci": 180, - "utf32_croatian_ci": 181, - "utf32_unicode_520_ci": 182, - "utf32_vietnamese_ci": 183, - "utf8_unicode_ci": 192, - "utf8_icelandic_ci": 193, - "utf8_latvian_ci": 194, - "utf8_romanian_ci": 195, - "utf8_slovenian_ci": 196, - "utf8_polish_ci": 197, - "utf8_estonian_ci": 198, - "utf8_spanish_ci": 199, - "utf8_swedish_ci": 200, - "utf8_turkish_ci": 201, - "utf8_czech_ci": 202, - "utf8_danish_ci": 203, - "utf8_lithuanian_ci": 204, - "utf8_slovak_ci": 205, - "utf8_spanish2_ci": 206, - "utf8_roman_ci": 207, - "utf8_persian_ci": 208, - "utf8_esperanto_ci": 209, - "utf8_hungarian_ci": 210, - "utf8_sinhala_ci": 211, - "utf8_german2_ci": 212, - "utf8_croatian_ci": 213, - "utf8_unicode_520_ci": 214, - "utf8_vietnamese_ci": 215, - "utf8_general_mysql500_ci": 223, - "utf8mb4_unicode_ci": 224, - "utf8mb4_icelandic_ci": 225, - "utf8mb4_latvian_ci": 226, - "utf8mb4_romanian_ci": 227, - "utf8mb4_slovenian_ci": 228, - "utf8mb4_polish_ci": 229, - "utf8mb4_estonian_ci": 230, - "utf8mb4_spanish_ci": 231, - "utf8mb4_swedish_ci": 232, - "utf8mb4_turkish_ci": 233, - "utf8mb4_czech_ci": 234, - "utf8mb4_danish_ci": 235, - "utf8mb4_lithuanian_ci": 236, - "utf8mb4_slovak_ci": 237, - "utf8mb4_spanish2_ci": 238, - "utf8mb4_roman_ci": 239, - "utf8mb4_persian_ci": 240, - "utf8mb4_esperanto_ci": 241, - "utf8mb4_hungarian_ci": 242, - "utf8mb4_sinhala_ci": 243, - "utf8mb4_german2_ci": 244, - "utf8mb4_croatian_ci": 245, - "utf8mb4_unicode_520_ci": 246, - "utf8mb4_vietnamese_ci": 247, -} - -// A blacklist of collations which is unsafe to interpolate parameters. -// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. -var unsafeCollations = map[string]bool{ - "big5_chinese_ci": true, - "sjis_japanese_ci": true, - "gbk_chinese_ci": true, - "big5_bin": true, - "gb2312_bin": true, - "gbk_bin": true, - "sjis_bin": true, - "cp932_japanese_ci": true, - "cp932_bin": true, -} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go deleted file mode 100755 index c3899de..0000000 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ /dev/null @@ -1,372 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "net" - "strconv" - "strings" - "time" -) - -type mysqlConn struct { - buf buffer - netConn net.Conn - affectedRows uint64 - insertId uint64 - cfg *Config - maxPacketAllowed int - maxWriteSize int - writeTimeout time.Duration - flags clientFlag - status statusFlag - sequence uint8 - parseTime bool - strict bool -} - -// Handles parameters set in DSN after the connection is established -func (mc *mysqlConn) handleParams() (err error) { - for param, val := range mc.cfg.Params { - switch param { - // Charset - case "charset": - charsets := strings.Split(val, ",") - for i := range charsets { - // ignore errors here - a charset may not exist - err = mc.exec("SET NAMES " + charsets[i]) - if err == nil { - break - } - } - if err != nil { - return - } - - // System Vars - default: - err = mc.exec("SET " + param + "=" + val + "") - if err != nil { - return - } - } - } - - return -} - -func (mc *mysqlConn) Begin() (driver.Tx, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - err := mc.exec("START TRANSACTION") - if err == nil { - return &mysqlTx{mc}, err - } - - return nil, err -} - -func (mc *mysqlConn) Close() (err error) { - // Makes Close idempotent - if mc.netConn != nil { - err = mc.writeCommandPacket(comQuit) - } - - mc.cleanup() - - return -} - -// Closes the network connection and unsets internal variables. Do not call this -// function after successfully authentication, call Close instead. This function -// is called before auth or on auth failure because MySQL will have already -// closed the network connection. -func (mc *mysqlConn) cleanup() { - // Makes cleanup idempotent - if mc.netConn != nil { - if err := mc.netConn.Close(); err != nil { - errLog.Print(err) - } - mc.netConn = nil - } - mc.cfg = nil - mc.buf.nc = nil -} - -func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := mc.writeCommandPacketStr(comStmtPrepare, query) - if err != nil { - return nil, err - } - - stmt := &mysqlStmt{ - mc: mc, - } - - // Read Result - columnCount, err := stmt.readPrepareResultPacket() - if err == nil { - if stmt.paramCount > 0 { - if err = mc.readUntilEOF(); err != nil { - return nil, err - } - } - - if columnCount > 0 { - err = mc.readUntilEOF() - } - } - - return stmt, err -} - -func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { - buf := mc.buf.takeCompleteBuffer() - if buf == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return "", driver.ErrBadConn - } - buf = buf[:0] - argPos := 0 - - for i := 0; i < len(query); i++ { - q := strings.IndexByte(query[i:], '?') - if q == -1 { - buf = append(buf, query[i:]...) - break - } - buf = append(buf, query[i:i+q]...) - i += q - - arg := args[argPos] - argPos++ - - if arg == nil { - buf = append(buf, "NULL"...) - continue - } - - switch v := arg.(type) { - case int64: - buf = strconv.AppendInt(buf, v, 10) - case float64: - buf = strconv.AppendFloat(buf, v, 'g', -1, 64) - case bool: - if v { - buf = append(buf, '1') - } else { - buf = append(buf, '0') - } - case time.Time: - if v.IsZero() { - buf = append(buf, "'0000-00-00'"...) - } else { - v := v.In(mc.cfg.Loc) - v = v.Add(time.Nanosecond * 500) // To round under microsecond - year := v.Year() - year100 := year / 100 - year1 := year % 100 - month := v.Month() - day := v.Day() - hour := v.Hour() - minute := v.Minute() - second := v.Second() - micro := v.Nanosecond() / 1000 - - buf = append(buf, []byte{ - '\'', - digits10[year100], digits01[year100], - digits10[year1], digits01[year1], - '-', - digits10[month], digits01[month], - '-', - digits10[day], digits01[day], - ' ', - digits10[hour], digits01[hour], - ':', - digits10[minute], digits01[minute], - ':', - digits10[second], digits01[second], - }...) - - if micro != 0 { - micro10000 := micro / 10000 - micro100 := micro / 100 % 100 - micro1 := micro % 100 - buf = append(buf, []byte{ - '.', - digits10[micro10000], digits01[micro10000], - digits10[micro100], digits01[micro100], - digits10[micro1], digits01[micro1], - }...) - } - buf = append(buf, '\'') - } - case []byte: - if v == nil { - buf = append(buf, "NULL"...) - } else { - buf = append(buf, "_binary'"...) - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeBytesBackslash(buf, v) - } else { - buf = escapeBytesQuotes(buf, v) - } - buf = append(buf, '\'') - } - case string: - buf = append(buf, '\'') - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeStringBackslash(buf, v) - } else { - buf = escapeStringQuotes(buf, v) - } - buf = append(buf, '\'') - default: - return "", driver.ErrSkip - } - - if len(buf)+4 > mc.maxPacketAllowed { - return "", driver.ErrSkip - } - } - if argPos != len(args) { - return "", driver.ErrSkip - } - return string(buf), nil -} - -func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - if len(args) != 0 { - if !mc.cfg.InterpolateParams { - return nil, driver.ErrSkip - } - // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement - prepared, err := mc.interpolateParams(query, args) - if err != nil { - return nil, err - } - query = prepared - args = nil - } - mc.affectedRows = 0 - mc.insertId = 0 - - err := mc.exec(query) - if err == nil { - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, err - } - return nil, err -} - -// Internal function to execute commands -func (mc *mysqlConn) exec(query string) error { - // Send command - err := mc.writeCommandPacketStr(comQuery, query) - if err != nil { - return err - } - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil && resLen > 0 { - if err = mc.readUntilEOF(); err != nil { - return err - } - - err = mc.readUntilEOF() - } - - return err -} - -func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - if len(args) != 0 { - if !mc.cfg.InterpolateParams { - return nil, driver.ErrSkip - } - // try client-side prepare to reduce roundtrip - prepared, err := mc.interpolateParams(query, args) - if err != nil { - return nil, err - } - query = prepared - args = nil - } - // Send command - err := mc.writeCommandPacketStr(comQuery, query) - if err == nil { - // Read Result - var resLen int - resLen, err = mc.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc - - if resLen == 0 { - // no columns, no more data - return emptyRows{}, nil - } - // Columns - rows.columns, err = mc.readColumns(resLen) - return rows, err - } - } - return nil, err -} - -// Gets the value of the given MySQL System Variable -// The returned byte slice is only valid until the next read -func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { - // Send command - if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { - return nil, err - } - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc - rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}} - - if resLen > 0 { - // Columns - if err := mc.readUntilEOF(); err != nil { - return nil, err - } - } - - dest := make([]driver.Value, resLen) - if err = rows.readRow(dest); err == nil { - return dest[0].([]byte), mc.readUntilEOF() - } - } - return nil, err -} diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go deleted file mode 100755 index 88cfff3..0000000 --- a/vendor/github.com/go-sql-driver/mysql/const.go +++ /dev/null @@ -1,163 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -const ( - minProtocolVersion byte = 10 - maxPacketSize = 1<<24 - 1 - timeFormat = "2006-01-02 15:04:05.999999" -) - -// MySQL constants documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -const ( - iOK byte = 0x00 - iLocalInFile byte = 0xfb - iEOF byte = 0xfe - iERR byte = 0xff -) - -// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags -type clientFlag uint32 - -const ( - clientLongPassword clientFlag = 1 << iota - clientFoundRows - clientLongFlag - clientConnectWithDB - clientNoSchema - clientCompress - clientODBC - clientLocalFiles - clientIgnoreSpace - clientProtocol41 - clientInteractive - clientSSL - clientIgnoreSIGPIPE - clientTransactions - clientReserved - clientSecureConn - clientMultiStatements - clientMultiResults - clientPSMultiResults - clientPluginAuth - clientConnectAttrs - clientPluginAuthLenEncClientData - clientCanHandleExpiredPasswords - clientSessionTrack - clientDeprecateEOF -) - -const ( - comQuit byte = iota + 1 - comInitDB - comQuery - comFieldList - comCreateDB - comDropDB - comRefresh - comShutdown - comStatistics - comProcessInfo - comConnect - comProcessKill - comDebug - comPing - comTime - comDelayedInsert - comChangeUser - comBinlogDump - comTableDump - comConnectOut - comRegisterSlave - comStmtPrepare - comStmtExecute - comStmtSendLongData - comStmtClose - comStmtReset - comSetOption - comStmtFetch -) - -// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType -const ( - fieldTypeDecimal byte = iota - fieldTypeTiny - fieldTypeShort - fieldTypeLong - fieldTypeFloat - fieldTypeDouble - fieldTypeNULL - fieldTypeTimestamp - fieldTypeLongLong - fieldTypeInt24 - fieldTypeDate - fieldTypeTime - fieldTypeDateTime - fieldTypeYear - fieldTypeNewDate - fieldTypeVarChar - fieldTypeBit -) -const ( - fieldTypeJSON byte = iota + 0xf5 - fieldTypeNewDecimal - fieldTypeEnum - fieldTypeSet - fieldTypeTinyBLOB - fieldTypeMediumBLOB - fieldTypeLongBLOB - fieldTypeBLOB - fieldTypeVarString - fieldTypeString - fieldTypeGeometry -) - -type fieldFlag uint16 - -const ( - flagNotNULL fieldFlag = 1 << iota - flagPriKey - flagUniqueKey - flagMultipleKey - flagBLOB - flagUnsigned - flagZeroFill - flagBinary - flagEnum - flagAutoIncrement - flagTimestamp - flagSet - flagUnknown1 - flagUnknown2 - flagUnknown3 - flagUnknown4 -) - -// http://dev.mysql.com/doc/internals/en/status-flags.html -type statusFlag uint16 - -const ( - statusInTrans statusFlag = 1 << iota - statusInAutocommit - statusReserved // Not in documentation - statusMoreResultsExists - statusNoGoodIndexUsed - statusNoIndexUsed - statusCursorExists - statusLastRowSent - statusDbDropped - statusNoBackslashEscapes - statusMetadataChanged - statusQueryWasSlow - statusPsOutParams - statusInTransReadonly - statusSessionStateChanged -) diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go deleted file mode 100755 index 6854929..0000000 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// Package mysql provides a MySQL DriverStatus for Go's database/sql package -// -// The DriverStatus should be used via the database/sql package: -// -// import "database/sql" -// import _ "github.com/go-sql-DriverStatus/mysql" -// -// db, err := sql.Open("mysql", "user:password@/dbname") -// -// See https://github.com/go-sql-driver/mysql#usage for details -package mysql - -import ( - "database/sql" - "database/sql/driver" - "net" -) - -// MySQLDriver is exported to make the DriverStatus directly accessible. -// In general the DriverStatus is used via the database/sql package. -type MySQLDriver struct{} - -// DialFunc is a function which can be used to establish the network connection. -// Custom dial functions must be registered with RegisterDial -type DialFunc func(addr string) (net.Conn, error) - -var dials map[string]DialFunc - -// RegisterDial registers a custom dial function. It can then be used by the -// network address mynet(addr), where mynet is the registered new network. -// addr is passed as a parameter to the dial function. -func RegisterDial(net string, dial DialFunc) { - if dials == nil { - dials = make(map[string]DialFunc) - } - dials[net] = dial -} - -// Open new Connection. -// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how -// the DSN string is formated -func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { - var err error - - // New mysqlConn - mc := &mysqlConn{ - maxPacketAllowed: maxPacketSize, - maxWriteSize: maxPacketSize - 1, - } - mc.cfg, err = ParseDSN(dsn) - if err != nil { - return nil, err - } - mc.parseTime = mc.cfg.ParseTime - mc.strict = mc.cfg.Strict - - // Connect to Server - if dial, ok := dials[mc.cfg.Net]; ok { - mc.netConn, err = dial(mc.cfg.Addr) - } else { - nd := net.Dialer{Timeout: mc.cfg.Timeout} - mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr) - } - if err != nil { - return nil, err - } - - // Enable TCP Keepalives on TCP connections - if tc, ok := mc.netConn.(*net.TCPConn); ok { - if err := tc.SetKeepAlive(true); err != nil { - // Don't send COM_QUIT before handshake. - mc.netConn.Close() - mc.netConn = nil - return nil, err - } - } - - mc.buf = newBuffer(mc.netConn) - - // Set I/O timeouts - mc.buf.timeout = mc.cfg.ReadTimeout - mc.writeTimeout = mc.cfg.WriteTimeout - - // Reading Handshake Initialization Packet - cipher, err := mc.readInitPacket() - if err != nil { - mc.cleanup() - return nil, err - } - - // Send Client Authentication Packet - if err = mc.writeAuthPacket(cipher); err != nil { - mc.cleanup() - return nil, err - } - - // Handle response to auth packet, switch methods if possible - if err = handleAuthResult(mc, cipher); err != nil { - // Authentication failed and MySQL has already closed the connection - // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). - // Do not send COM_QUIT, just cleanup and return the error. - mc.cleanup() - return nil, err - } - - // Get max allowed packet size - maxap, err := mc.getSystemVar("max_allowed_packet") - if err != nil { - mc.Close() - return nil, err - } - mc.maxPacketAllowed = stringToInt(maxap) - 1 - if mc.maxPacketAllowed < maxPacketSize { - mc.maxWriteSize = mc.maxPacketAllowed - } - - // Handle DSN Params - err = mc.handleParams() - if err != nil { - mc.Close() - return nil, err - } - - return mc, nil -} - -func handleAuthResult(mc *mysqlConn, cipher []byte) error { - // Read Result Packet - err := mc.readResultOK() - if err == nil { - return nil // auth successful - } - - if mc.cfg == nil { - return err // auth failed and retry not possible - } - - // Retry auth if configured to do so. - if mc.cfg.AllowOldPasswords && err == ErrOldPassword { - // Retry with old authentication method. Note: there are edge cases - // where this should work but doesn't; this is currently "wontfix": - // https://github.com/go-sql-driver/mysql/issues/184 - if err = mc.writeOldAuthPacket(cipher); err != nil { - return err - } - err = mc.readResultOK() - } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword { - // Retry with clear text password for - // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html - // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html - if err = mc.writeClearAuthPacket(); err != nil { - return err - } - err = mc.readResultOK() - } - return err -} - -func init() { - sql.Register("mysql", &MySQLDriver{}) -} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go deleted file mode 100755 index 73138bc..0000000 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ /dev/null @@ -1,513 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "strings" - "time" -) - -var ( - errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") - errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") - errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") - errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") -) - -// Config is a configuration parsed from a DSN string -type Config struct { - User string // Username - Passwd string // Password (requires User) - Net string // Network type - Addr string // Network address (requires Net) - DBName string // Database name - Params map[string]string // Connection parameters - Collation string // Connection collation - Loc *time.Location // Location for time.Time values - TLSConfig string // TLS configuration name - tls *tls.Config // TLS configuration - Timeout time.Duration // Dial timeout - ReadTimeout time.Duration // I/O read timeout - WriteTimeout time.Duration // I/O write timeout - - AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE - AllowCleartextPasswords bool // Allows the cleartext client side plugin - AllowOldPasswords bool // Allows the old insecure password method - ClientFoundRows bool // Return number of matching rows instead of rows changed - ColumnsWithAlias bool // Prepend table alias to column names - InterpolateParams bool // Interpolate placeholders into query string - MultiStatements bool // Allow multiple statements in one query - ParseTime bool // Parse time values to time.Time - Strict bool // Return warnings as errors -} - -// FormatDSN formats the given Config into a DSN string which can be passed to -// the driver. -func (cfg *Config) FormatDSN() string { - var buf bytes.Buffer - - // [username[:password]@] - if len(cfg.User) > 0 { - buf.WriteString(cfg.User) - if len(cfg.Passwd) > 0 { - buf.WriteByte(':') - buf.WriteString(cfg.Passwd) - } - buf.WriteByte('@') - } - - // [protocol[(address)]] - if len(cfg.Net) > 0 { - buf.WriteString(cfg.Net) - if len(cfg.Addr) > 0 { - buf.WriteByte('(') - buf.WriteString(cfg.Addr) - buf.WriteByte(')') - } - } - - // /dbname - buf.WriteByte('/') - buf.WriteString(cfg.DBName) - - // [?param1=value1&...¶mN=valueN] - hasParam := false - - if cfg.AllowAllFiles { - hasParam = true - buf.WriteString("?allowAllFiles=true") - } - - if cfg.AllowCleartextPasswords { - if hasParam { - buf.WriteString("&allowCleartextPasswords=true") - } else { - hasParam = true - buf.WriteString("?allowCleartextPasswords=true") - } - } - - if cfg.AllowOldPasswords { - if hasParam { - buf.WriteString("&allowOldPasswords=true") - } else { - hasParam = true - buf.WriteString("?allowOldPasswords=true") - } - } - - if cfg.ClientFoundRows { - if hasParam { - buf.WriteString("&clientFoundRows=true") - } else { - hasParam = true - buf.WriteString("?clientFoundRows=true") - } - } - - if col := cfg.Collation; col != defaultCollation && len(col) > 0 { - if hasParam { - buf.WriteString("&collation=") - } else { - hasParam = true - buf.WriteString("?collation=") - } - buf.WriteString(col) - } - - if cfg.ColumnsWithAlias { - if hasParam { - buf.WriteString("&columnsWithAlias=true") - } else { - hasParam = true - buf.WriteString("?columnsWithAlias=true") - } - } - - if cfg.InterpolateParams { - if hasParam { - buf.WriteString("&interpolateParams=true") - } else { - hasParam = true - buf.WriteString("?interpolateParams=true") - } - } - - if cfg.Loc != time.UTC && cfg.Loc != nil { - if hasParam { - buf.WriteString("&loc=") - } else { - hasParam = true - buf.WriteString("?loc=") - } - buf.WriteString(url.QueryEscape(cfg.Loc.String())) - } - - if cfg.MultiStatements { - if hasParam { - buf.WriteString("&multiStatements=true") - } else { - hasParam = true - buf.WriteString("?multiStatements=true") - } - } - - if cfg.ParseTime { - if hasParam { - buf.WriteString("&parseTime=true") - } else { - hasParam = true - buf.WriteString("?parseTime=true") - } - } - - if cfg.ReadTimeout > 0 { - if hasParam { - buf.WriteString("&readTimeout=") - } else { - hasParam = true - buf.WriteString("?readTimeout=") - } - buf.WriteString(cfg.ReadTimeout.String()) - } - - if cfg.Strict { - if hasParam { - buf.WriteString("&strict=true") - } else { - hasParam = true - buf.WriteString("?strict=true") - } - } - - if cfg.Timeout > 0 { - if hasParam { - buf.WriteString("&timeout=") - } else { - hasParam = true - buf.WriteString("?timeout=") - } - buf.WriteString(cfg.Timeout.String()) - } - - if len(cfg.TLSConfig) > 0 { - if hasParam { - buf.WriteString("&tls=") - } else { - hasParam = true - buf.WriteString("?tls=") - } - buf.WriteString(url.QueryEscape(cfg.TLSConfig)) - } - - if cfg.WriteTimeout > 0 { - if hasParam { - buf.WriteString("&writeTimeout=") - } else { - hasParam = true - buf.WriteString("?writeTimeout=") - } - buf.WriteString(cfg.WriteTimeout.String()) - } - - // other params - if cfg.Params != nil { - for param, value := range cfg.Params { - if hasParam { - buf.WriteByte('&') - } else { - hasParam = true - buf.WriteByte('?') - } - - buf.WriteString(param) - buf.WriteByte('=') - buf.WriteString(url.QueryEscape(value)) - } - } - - return buf.String() -} - -// ParseDSN parses the DSN string to a Config -func ParseDSN(dsn string) (cfg *Config, err error) { - // New config with some default values - cfg = &Config{ - Loc: time.UTC, - Collation: defaultCollation, - } - - // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] - // Find the last '/' (since the password or the net addr might contain a '/') - foundSlash := false - for i := len(dsn) - 1; i >= 0; i-- { - if dsn[i] == '/' { - foundSlash = true - var j, k int - - // left part is empty if i <= 0 - if i > 0 { - // [username[:password]@][protocol[(address)]] - // Find the last '@' in dsn[:i] - for j = i; j >= 0; j-- { - if dsn[j] == '@' { - // username[:password] - // Find the first ':' in dsn[:j] - for k = 0; k < j; k++ { - if dsn[k] == ':' { - cfg.Passwd = dsn[k+1 : j] - break - } - } - cfg.User = dsn[:k] - - break - } - } - - // [protocol[(address)]] - // Find the first '(' in dsn[j+1:i] - for k = j + 1; k < i; k++ { - if dsn[k] == '(' { - // dsn[i-1] must be == ')' if an address is specified - if dsn[i-1] != ')' { - if strings.ContainsRune(dsn[k+1:i], ')') { - return nil, errInvalidDSNUnescaped - } - return nil, errInvalidDSNAddr - } - cfg.Addr = dsn[k+1 : i-1] - break - } - } - cfg.Net = dsn[j+1 : k] - } - - // dbname[?param1=value1&...¶mN=valueN] - // Find the first '?' in dsn[i+1:] - for j = i + 1; j < len(dsn); j++ { - if dsn[j] == '?' { - if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { - return - } - break - } - } - cfg.DBName = dsn[i+1 : j] - - break - } - } - - if !foundSlash && len(dsn) > 0 { - return nil, errInvalidDSNNoSlash - } - - if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { - return nil, errInvalidDSNUnsafeCollation - } - - // Set default network if empty - if cfg.Net == "" { - cfg.Net = "tcp" - } - - // Set default address if empty - if cfg.Addr == "" { - switch cfg.Net { - case "tcp": - cfg.Addr = "127.0.0.1:3306" - case "unix": - cfg.Addr = "/tmp/mysql.sock" - default: - return nil, errors.New("default addr for network '" + cfg.Net + "' unknown") - } - - } - - return -} - -// parseDSNParams parses the DSN "query string" -// Values must be url.QueryEscape'ed -func parseDSNParams(cfg *Config, params string) (err error) { - for _, v := range strings.Split(params, "&") { - param := strings.SplitN(v, "=", 2) - if len(param) != 2 { - continue - } - - // cfg params - switch value := param[1]; param[0] { - - // Disable INFILE whitelist / enable all files - case "allowAllFiles": - var isBool bool - cfg.AllowAllFiles, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Use cleartext authentication mode (MySQL 5.5.10+) - case "allowCleartextPasswords": - var isBool bool - cfg.AllowCleartextPasswords, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Use old authentication mode (pre MySQL 4.1) - case "allowOldPasswords": - var isBool bool - cfg.AllowOldPasswords, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Switch "rowsAffected" mode - case "clientFoundRows": - var isBool bool - cfg.ClientFoundRows, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Collation - case "collation": - cfg.Collation = value - break - - case "columnsWithAlias": - var isBool bool - cfg.ColumnsWithAlias, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Compression - case "compress": - return errors.New("compression not implemented yet") - - // Enable client side placeholder substitution - case "interpolateParams": - var isBool bool - cfg.InterpolateParams, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Time Location - case "loc": - if value, err = url.QueryUnescape(value); err != nil { - return - } - cfg.Loc, err = time.LoadLocation(value) - if err != nil { - return - } - - // multiple statements in one query - case "multiStatements": - var isBool bool - cfg.MultiStatements, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // time.Time parsing - case "parseTime": - var isBool bool - cfg.ParseTime, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // I/O read Timeout - case "readTimeout": - cfg.ReadTimeout, err = time.ParseDuration(value) - if err != nil { - return - } - - // Strict mode - case "strict": - var isBool bool - cfg.Strict, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Dial Timeout - case "timeout": - cfg.Timeout, err = time.ParseDuration(value) - if err != nil { - return - } - - // TLS-Encryption - case "tls": - boolValue, isBool := readBool(value) - if isBool { - if boolValue { - cfg.TLSConfig = "true" - cfg.tls = &tls.Config{} - } else { - cfg.TLSConfig = "false" - } - } else if vl := strings.ToLower(value); vl == "skip-verify" { - cfg.TLSConfig = vl - cfg.tls = &tls.Config{InsecureSkipVerify: true} - } else { - name, err := url.QueryUnescape(value) - if err != nil { - return fmt.Errorf("invalid value for TLS config name: %v", err) - } - - if tlsConfig, ok := tlsConfigRegister[name]; ok { - if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { - host, _, err := net.SplitHostPort(cfg.Addr) - if err == nil { - tlsConfig.ServerName = host - } - } - - cfg.TLSConfig = name - cfg.tls = tlsConfig - } else { - return errors.New("invalid value / unknown config name: " + name) - } - } - - // I/O write Timeout - case "writeTimeout": - cfg.WriteTimeout, err = time.ParseDuration(value) - if err != nil { - return - } - - default: - // lazy init - if cfg.Params == nil { - cfg.Params = make(map[string]string) - } - - if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { - return - } - } - } - - return -} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go deleted file mode 100755 index 9803c58..0000000 --- a/vendor/github.com/go-sql-driver/mysql/errors.go +++ /dev/null @@ -1,131 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "errors" - "fmt" - "io" - "log" - "os" -) - -// Various errors the DriverStatus might return. Can change between DriverStatus versions. -var ( - ErrInvalidConn = errors.New("invalid connection") - ErrMalformPkt = errors.New("malformed packet") - ErrNoTLS = errors.New("TLS requested but server does not support TLS") - ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-DriverStatus/mysql/wiki/old_passwords") - ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") - ErrUnknownPlugin = errors.New("this authentication plugin is not supported") - ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") - ErrPktSync = errors.New("commands out of sync. You can't run this command now") - ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") - ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") - ErrBusyBuffer = errors.New("busy buffer") -) - -var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) - -// Logger is used to log critical error messages. -type Logger interface { - Print(v ...interface{}) -} - -// SetLogger is used to set the logger for critical errors. -// The initial logger is os.Stderr. -func SetLogger(logger Logger) error { - if logger == nil { - return errors.New("logger is nil") - } - errLog = logger - return nil -} - -// MySQLError is an error type which represents a single MySQL error -type MySQLError struct { - Number uint16 - Message string -} - -func (me *MySQLError) Error() string { - return fmt.Sprintf("Error %d: %s", me.Number, me.Message) -} - -// MySQLWarnings is an error type which represents a group of one or more MySQL -// warnings -type MySQLWarnings []MySQLWarning - -func (mws MySQLWarnings) Error() string { - var msg string - for i, warning := range mws { - if i > 0 { - msg += "\r\n" - } - msg += fmt.Sprintf( - "%s %s: %s", - warning.Level, - warning.Code, - warning.Message, - ) - } - return msg -} - -// MySQLWarning is an error type which represents a single MySQL warning. -// Warnings are returned in groups only. See MySQLWarnings -type MySQLWarning struct { - Level string - Code string - Message string -} - -func (mc *mysqlConn) getWarnings() (err error) { - rows, err := mc.Query("SHOW WARNINGS", nil) - if err != nil { - return - } - - var warnings = MySQLWarnings{} - var values = make([]driver.Value, 3) - - for { - err = rows.Next(values) - switch err { - case nil: - warning := MySQLWarning{} - - if raw, ok := values[0].([]byte); ok { - warning.Level = string(raw) - } else { - warning.Level = fmt.Sprintf("%s", values[0]) - } - if raw, ok := values[1].([]byte); ok { - warning.Code = string(raw) - } else { - warning.Code = fmt.Sprintf("%s", values[1]) - } - if raw, ok := values[2].([]byte); ok { - warning.Message = string(raw) - } else { - warning.Message = fmt.Sprintf("%s", values[0]) - } - - warnings = append(warnings, warning) - - case io.EOF: - return warnings - - default: - rows.Close() - return - } - } -} diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go deleted file mode 100755 index 0f975bb..0000000 --- a/vendor/github.com/go-sql-driver/mysql/infile.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "fmt" - "io" - "os" - "strings" - "sync" -) - -var ( - fileRegister map[string]bool - fileRegisterLock sync.RWMutex - readerRegister map[string]func() io.Reader - readerRegisterLock sync.RWMutex -) - -// RegisterLocalFile adds the given file to the file whitelist, -// so that it can be used by "LOAD DATA LOCAL INFILE ". -// Alternatively you can allow the use of all local files with -// the DSN parameter 'allowAllFiles=true' -// -// filePath := "/home/gopher/data.csv" -// mysql.RegisterLocalFile(filePath) -// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") -// if err != nil { -// ... -// -func RegisterLocalFile(filePath string) { - fileRegisterLock.Lock() - // lazy map init - if fileRegister == nil { - fileRegister = make(map[string]bool) - } - - fileRegister[strings.Trim(filePath, `"`)] = true - fileRegisterLock.Unlock() -} - -// DeregisterLocalFile removes the given filepath from the whitelist. -func DeregisterLocalFile(filePath string) { - fileRegisterLock.Lock() - delete(fileRegister, strings.Trim(filePath, `"`)) - fileRegisterLock.Unlock() -} - -// RegisterReaderHandler registers a handler function which is used -// to receive a io.Reader. -// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". -// If the handler returns a io.ReadCloser Close() is called when the -// request is finished. -// -// mysql.RegisterReaderHandler("data", func() io.Reader { -// var csvReader io.Reader // Some Reader that returns CSV data -// ... // Open Reader here -// return csvReader -// }) -// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") -// if err != nil { -// ... -// -func RegisterReaderHandler(name string, handler func() io.Reader) { - readerRegisterLock.Lock() - // lazy map init - if readerRegister == nil { - readerRegister = make(map[string]func() io.Reader) - } - - readerRegister[name] = handler - readerRegisterLock.Unlock() -} - -// DeregisterReaderHandler removes the ReaderHandler function with -// the given name from the registry. -func DeregisterReaderHandler(name string) { - readerRegisterLock.Lock() - delete(readerRegister, name) - readerRegisterLock.Unlock() -} - -func deferredClose(err *error, closer io.Closer) { - closeErr := closer.Close() - if *err == nil { - *err = closeErr - } -} - -func (mc *mysqlConn) handleInFileRequest(name string) (err error) { - var rdr io.Reader - var data []byte - packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP - if mc.maxWriteSize < packetSize { - packetSize = mc.maxWriteSize - } - - if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader - // The server might return an an absolute path. See issue #355. - name = name[idx+8:] - - readerRegisterLock.RLock() - handler, inMap := readerRegister[name] - readerRegisterLock.RUnlock() - - if inMap { - rdr = handler() - if rdr != nil { - if cl, ok := rdr.(io.Closer); ok { - defer deferredClose(&err, cl) - } - } else { - err = fmt.Errorf("Reader '%s' is ", name) - } - } else { - err = fmt.Errorf("Reader '%s' is not registered", name) - } - } else { // File - name = strings.Trim(name, `"`) - fileRegisterLock.RLock() - fr := fileRegister[name] - fileRegisterLock.RUnlock() - if mc.cfg.AllowAllFiles || fr { - var file *os.File - var fi os.FileInfo - - if file, err = os.Open(name); err == nil { - defer deferredClose(&err, file) - - // get file size - if fi, err = file.Stat(); err == nil { - rdr = file - if fileSize := int(fi.Size()); fileSize < packetSize { - packetSize = fileSize - } - } - } - } else { - err = fmt.Errorf("local file '%s' is not registered", name) - } - } - - // send content packets - if err == nil { - data := make([]byte, 4+packetSize) - var n int - for err == nil { - n, err = rdr.Read(data[4:]) - if n > 0 { - if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { - return ioErr - } - } - } - if err == io.EOF { - err = nil - } - } - - // send empty packet (termination) - if data == nil { - data = make([]byte, 4) - } - if ioErr := mc.writePacket(data[:4]); ioErr != nil { - return ioErr - } - - // read OK packet - if err == nil { - return mc.readResultOK() - } - - mc.readPacket() - return err -} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go deleted file mode 100755 index 6025399..0000000 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ /dev/null @@ -1,1246 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "crypto/tls" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "time" -) - -// Packets documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -// Read packet to buffer 'data' -func (mc *mysqlConn) readPacket() ([]byte, error) { - var payload []byte - for { - // Read packet header - data, err := mc.buf.readNext(4) - if err != nil { - errLog.Print(err) - mc.Close() - return nil, driver.ErrBadConn - } - - // Packet Length [24 bit] - pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) - - if pktLen < 1 { - errLog.Print(ErrMalformPkt) - mc.Close() - return nil, driver.ErrBadConn - } - - // Check Packet Sync [8 bit] - if data[3] != mc.sequence { - if data[3] > mc.sequence { - return nil, ErrPktSyncMul - } - return nil, ErrPktSync - } - mc.sequence++ - - // Read packet body [pktLen bytes] - data, err = mc.buf.readNext(pktLen) - if err != nil { - errLog.Print(err) - mc.Close() - return nil, driver.ErrBadConn - } - - isLastPacket := (pktLen < maxPacketSize) - - // Zero allocations for non-splitting packets - if isLastPacket && payload == nil { - return data, nil - } - - payload = append(payload, data...) - - if isLastPacket { - return payload, nil - } - } -} - -// Write packet buffer 'data' -func (mc *mysqlConn) writePacket(data []byte) error { - pktLen := len(data) - 4 - - if pktLen > mc.maxPacketAllowed { - return ErrPktTooLarge - } - - for { - var size int - if pktLen >= maxPacketSize { - data[0] = 0xff - data[1] = 0xff - data[2] = 0xff - size = maxPacketSize - } else { - data[0] = byte(pktLen) - data[1] = byte(pktLen >> 8) - data[2] = byte(pktLen >> 16) - size = pktLen - } - data[3] = mc.sequence - - // Write packet - if mc.writeTimeout > 0 { - if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { - return err - } - } - - n, err := mc.netConn.Write(data[:4+size]) - if err == nil && n == 4+size { - mc.sequence++ - if size != maxPacketSize { - return nil - } - pktLen -= size - data = data[size:] - continue - } - - // Handle error - if err == nil { // n != len(data) - errLog.Print(ErrMalformPkt) - } else { - errLog.Print(err) - } - return driver.ErrBadConn - } -} - -/****************************************************************************** -* Initialisation Process * -******************************************************************************/ - -// Handshake Initialization Packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake -func (mc *mysqlConn) readInitPacket() ([]byte, error) { - data, err := mc.readPacket() - if err != nil { - return nil, err - } - - if data[0] == iERR { - return nil, mc.handleErrorPacket(data) - } - - // protocol version [1 byte] - if data[0] < minProtocolVersion { - return nil, fmt.Errorf( - "unsupported protocol version %d. Version %d or higher is required", - data[0], - minProtocolVersion, - ) - } - - // server version [null terminated string] - // connection id [4 bytes] - pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 - - // first part of the password cipher [8 bytes] - cipher := data[pos : pos+8] - - // (filler) always 0x00 [1 byte] - pos += 8 + 1 - - // capability flags (lower 2 bytes) [2 bytes] - mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) - if mc.flags&clientProtocol41 == 0 { - return nil, ErrOldProtocol - } - if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { - return nil, ErrNoTLS - } - pos += 2 - - if len(data) > pos { - // character set [1 byte] - // status flags [2 bytes] - // capability flags (upper 2 bytes) [2 bytes] - // length of auth-plugin-data [1 byte] - // reserved (all [00]) [10 bytes] - pos += 1 + 2 + 2 + 1 + 10 - - // second part of the password cipher [mininum 13 bytes], - // where len=MAX(13, length of auth-plugin-data - 8) - // - // The web documentation is ambiguous about the length. However, - // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, - // the 13th byte is "\0 byte, terminating the second part of - // a scramble". So the second part of the password cipher is - // a NULL terminated string that's at least 13 bytes with the - // last byte being NULL. - // - // The official Python library uses the fixed length 12 - // which seems to work but technically could have a hidden bug. - cipher = append(cipher, data[pos:pos+12]...) - - // TODO: Verify string termination - // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) - // \NUL otherwise - // - //if data[len(data)-1] == 0 { - // return - //} - //return ErrMalformPkt - - // make a memory safe copy of the cipher slice - var b [20]byte - copy(b[:], cipher) - return b[:], nil - } - - // make a memory safe copy of the cipher slice - var b [8]byte - copy(b[:], cipher) - return b[:], nil -} - -// Client Authentication Packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse -func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { - // Adjust client flags based on server support - clientFlags := clientProtocol41 | - clientSecureConn | - clientLongPassword | - clientTransactions | - clientLocalFiles | - clientPluginAuth | - clientMultiResults | - mc.flags&clientLongFlag - - if mc.cfg.ClientFoundRows { - clientFlags |= clientFoundRows - } - - // To enable TLS / SSL - if mc.cfg.tls != nil { - clientFlags |= clientSSL - } - - if mc.cfg.MultiStatements { - clientFlags |= clientMultiStatements - } - - // User Password - scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd)) - - pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1 - - // To specify a db name - if n := len(mc.cfg.DBName); n > 0 { - clientFlags |= clientConnectWithDB - pktLen += n + 1 - } - - // Calculate packet length and get buffer with that size - data := mc.buf.takeSmallBuffer(pktLen + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // ClientFlags [32 bit] - data[4] = byte(clientFlags) - data[5] = byte(clientFlags >> 8) - data[6] = byte(clientFlags >> 16) - data[7] = byte(clientFlags >> 24) - - // MaxPacketSize [32 bit] (none) - data[8] = 0x00 - data[9] = 0x00 - data[10] = 0x00 - data[11] = 0x00 - - // Charset [1 byte] - var found bool - data[12], found = collations[mc.cfg.Collation] - if !found { - // Note possibility for false negatives: - // could be triggered although the collation is valid if the - // collations map does not contain entries the server supports. - return errors.New("unknown collation") - } - - // SSL Connection Request Packet - // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest - if mc.cfg.tls != nil { - // Send TLS / SSL request packet - if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { - return err - } - - // Switch to TLS - tlsConn := tls.Client(mc.netConn, mc.cfg.tls) - if err := tlsConn.Handshake(); err != nil { - return err - } - mc.netConn = tlsConn - mc.buf.nc = tlsConn - } - - // Filler [23 bytes] (all 0x00) - pos := 13 - for ; pos < 13+23; pos++ { - data[pos] = 0 - } - - // User [null terminated string] - if len(mc.cfg.User) > 0 { - pos += copy(data[pos:], mc.cfg.User) - } - data[pos] = 0x00 - pos++ - - // ScrambleBuffer [length encoded integer] - data[pos] = byte(len(scrambleBuff)) - pos += 1 + copy(data[pos+1:], scrambleBuff) - - // Databasename [null terminated string] - if len(mc.cfg.DBName) > 0 { - pos += copy(data[pos:], mc.cfg.DBName) - data[pos] = 0x00 - pos++ - } - - // Assume native client during response - pos += copy(data[pos:], "mysql_native_password") - data[pos] = 0x00 - - // Send Auth packet - return mc.writePacket(data) -} - -// Client old authentication packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse -func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { - // User password - scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd)) - - // Calculate the packet length and add a tailing 0 - pktLen := len(scrambleBuff) + 1 - data := mc.buf.takeSmallBuffer(4 + pktLen) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add the scrambled password [null terminated string] - copy(data[4:], scrambleBuff) - data[4+pktLen-1] = 0x00 - - return mc.writePacket(data) -} - -// Client clear text authentication packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse -func (mc *mysqlConn) writeClearAuthPacket() error { - // Calculate the packet length and add a tailing 0 - pktLen := len(mc.cfg.Passwd) + 1 - data := mc.buf.takeSmallBuffer(4 + pktLen) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add the clear password [null terminated string] - copy(data[4:], mc.cfg.Passwd) - data[4+pktLen-1] = 0x00 - - return mc.writePacket(data) -} - -/****************************************************************************** -* Command Packets * -******************************************************************************/ - -func (mc *mysqlConn) writeCommandPacket(command byte) error { - // Reset Packet Sequence - mc.sequence = 0 - - data := mc.buf.takeSmallBuffer(4 + 1) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Send CMD packet - return mc.writePacket(data) -} - -func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { - // Reset Packet Sequence - mc.sequence = 0 - - pktLen := 1 + len(arg) - data := mc.buf.takeBuffer(pktLen + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Add arg - copy(data[5:], arg) - - // Send CMD packet - return mc.writePacket(data) -} - -func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { - // Reset Packet Sequence - mc.sequence = 0 - - data := mc.buf.takeSmallBuffer(4 + 1 + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Add arg [32 bit] - data[5] = byte(arg) - data[6] = byte(arg >> 8) - data[7] = byte(arg >> 16) - data[8] = byte(arg >> 24) - - // Send CMD packet - return mc.writePacket(data) -} - -/****************************************************************************** -* Result Packets * -******************************************************************************/ - -// Returns error if Packet is not an 'Result OK'-Packet -func (mc *mysqlConn) readResultOK() error { - data, err := mc.readPacket() - if err == nil { - // packet indicator - switch data[0] { - - case iOK: - return mc.handleOkPacket(data) - - case iEOF: - if len(data) > 1 { - plugin := string(data[1:bytes.IndexByte(data, 0x00)]) - if plugin == "mysql_old_password" { - // using old_passwords - return ErrOldPassword - } else if plugin == "mysql_clear_password" { - // using clear text password - return ErrCleartextPassword - } else { - return ErrUnknownPlugin - } - } else { - return ErrOldPassword - } - - default: // Error otherwise - return mc.handleErrorPacket(data) - } - } - return err -} - -// Result Set Header Packet -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset -func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { - data, err := mc.readPacket() - if err == nil { - switch data[0] { - - case iOK: - return 0, mc.handleOkPacket(data) - - case iERR: - return 0, mc.handleErrorPacket(data) - - case iLocalInFile: - return 0, mc.handleInFileRequest(string(data[1:])) - } - - // column count - num, _, n := readLengthEncodedInteger(data) - if n-len(data) == 0 { - return int(num), nil - } - - return 0, ErrMalformPkt - } - return 0, err -} - -// Error Packet -// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet -func (mc *mysqlConn) handleErrorPacket(data []byte) error { - if data[0] != iERR { - return ErrMalformPkt - } - - // 0xff [1 byte] - - // Error Number [16 bit uint] - errno := binary.LittleEndian.Uint16(data[1:3]) - - pos := 3 - - // SQL State [optional: # + 5bytes string] - if data[3] == 0x23 { - //sqlstate := string(data[4 : 4+5]) - pos = 9 - } - - // Error Message [string] - return &MySQLError{ - Number: errno, - Message: string(data[pos:]), - } -} - -func readStatus(b []byte) statusFlag { - return statusFlag(b[0]) | statusFlag(b[1])<<8 -} - -// Ok Packet -// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet -func (mc *mysqlConn) handleOkPacket(data []byte) error { - var n, m int - - // 0x00 [1 byte] - - // Affected rows [Length Coded Binary] - mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) - - // Insert id [Length Coded Binary] - mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) - - // server_status [2 bytes] - mc.status = readStatus(data[1+n+m : 1+n+m+2]) - if err := mc.discardResults(); err != nil { - return err - } - - // warning count [2 bytes] - if !mc.strict { - return nil - } - - pos := 1 + n + m + 2 - if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { - return mc.getWarnings() - } - return nil -} - -// Read Packets as Field Packets until EOF-Packet or an Error appears -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 -func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { - columns := make([]mysqlField, count) - - for i := 0; ; i++ { - data, err := mc.readPacket() - if err != nil { - return nil, err - } - - // EOF Packet - if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { - if i == count { - return columns, nil - } - return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns)) - } - - // Catalog - pos, err := skipLengthEncodedString(data) - if err != nil { - return nil, err - } - - // Database [len coded string] - n, err := skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Table [len coded string] - if mc.cfg.ColumnsWithAlias { - tableName, _, n, err := readLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - columns[i].tableName = string(tableName) - } else { - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - } - - // Original table [len coded string] - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Name [len coded string] - name, _, n, err := readLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - columns[i].name = string(name) - pos += n - - // Original name [len coded string] - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - - // Filler [uint8] - // Charset [charset, collation uint8] - // Length [uint32] - pos += n + 1 + 2 + 4 - - // Field type [uint8] - columns[i].fieldType = data[pos] - pos++ - - // Flags [uint16] - columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) - pos += 2 - - // Decimals [uint8] - columns[i].decimals = data[pos] - //pos++ - - // Default value [len coded binary] - //if pos < len(data) { - // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) - //} - } -} - -// Read Packets as Field Packets until EOF-Packet or an Error appears -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow -func (rows *textRows) readRow(dest []driver.Value) error { - mc := rows.mc - - data, err := mc.readPacket() - if err != nil { - return err - } - - // EOF Packet - if data[0] == iEOF && len(data) == 5 { - // server_status [2 bytes] - rows.mc.status = readStatus(data[3:]) - if err := rows.mc.discardResults(); err != nil { - return err - } - rows.mc = nil - return io.EOF - } - if data[0] == iERR { - rows.mc = nil - return mc.handleErrorPacket(data) - } - - // RowSet Packet - var n int - var isNull bool - pos := 0 - - for i := range dest { - // Read bytes and convert to string - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) - pos += n - if err == nil { - if !isNull { - if !mc.parseTime { - continue - } else { - switch rows.columns[i].fieldType { - case fieldTypeTimestamp, fieldTypeDateTime, - fieldTypeDate, fieldTypeNewDate: - dest[i], err = parseDateTime( - string(dest[i].([]byte)), - mc.cfg.Loc, - ) - if err == nil { - continue - } - default: - continue - } - } - - } else { - dest[i] = nil - continue - } - } - return err // err != nil - } - - return nil -} - -// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read -func (mc *mysqlConn) readUntilEOF() error { - for { - data, err := mc.readPacket() - if err != nil { - return err - } - - switch data[0] { - case iERR: - return mc.handleErrorPacket(data) - case iEOF: - if len(data) == 5 { - mc.status = readStatus(data[3:]) - } - return nil - } - } -} - -/****************************************************************************** -* Prepared Statements * -******************************************************************************/ - -// Prepare Result Packets -// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html -func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { - data, err := stmt.mc.readPacket() - if err == nil { - // packet indicator [1 byte] - if data[0] != iOK { - return 0, stmt.mc.handleErrorPacket(data) - } - - // statement id [4 bytes] - stmt.id = binary.LittleEndian.Uint32(data[1:5]) - - // Column count [16 bit uint] - columnCount := binary.LittleEndian.Uint16(data[5:7]) - - // Param count [16 bit uint] - stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) - - // Reserved [8 bit] - - // Warning count [16 bit uint] - if !stmt.mc.strict { - return columnCount, nil - } - - // Check for warnings count > 0, only available in MySQL > 4.1 - if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { - return columnCount, stmt.mc.getWarnings() - } - return columnCount, nil - } - return 0, err -} - -// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html -func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { - maxLen := stmt.mc.maxPacketAllowed - 1 - pktLen := maxLen - - // After the header (bytes 0-3) follows before the data: - // 1 byte command - // 4 bytes stmtID - // 2 bytes paramID - const dataOffset = 1 + 4 + 2 - - // Can not use the write buffer since - // a) the buffer is too small - // b) it is in use - data := make([]byte, 4+1+4+2+len(arg)) - - copy(data[4+dataOffset:], arg) - - for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { - if dataOffset+argLen < maxLen { - pktLen = dataOffset + argLen - } - - stmt.mc.sequence = 0 - // Add command byte [1 byte] - data[4] = comStmtSendLongData - - // Add stmtID [32 bit] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) - - // Add paramID [16 bit] - data[9] = byte(paramID) - data[10] = byte(paramID >> 8) - - // Send CMD packet - err := stmt.mc.writePacket(data[:4+pktLen]) - if err == nil { - data = data[pktLen-dataOffset:] - continue - } - return err - - } - - // Reset Packet Sequence - stmt.mc.sequence = 0 - return nil -} - -// Execute Prepared Statement -// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html -func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { - if len(args) != stmt.paramCount { - return fmt.Errorf( - "argument count mismatch (got: %d; has: %d)", - len(args), - stmt.paramCount, - ) - } - - const minPktLen = 4 + 1 + 4 + 1 + 4 - mc := stmt.mc - - // Reset packet-sequence - mc.sequence = 0 - - var data []byte - - if len(args) == 0 { - data = mc.buf.takeBuffer(minPktLen) - } else { - data = mc.buf.takeCompleteBuffer() - } - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // command [1 byte] - data[4] = comStmtExecute - - // statement_id [4 bytes] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) - - // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] - data[9] = 0x00 - - // iteration_count (uint32(1)) [4 bytes] - data[10] = 0x01 - data[11] = 0x00 - data[12] = 0x00 - data[13] = 0x00 - - if len(args) > 0 { - pos := minPktLen - - var nullMask []byte - if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { - // buffer has to be extended but we don't know by how much so - // we depend on append after all data with known sizes fit. - // We stop at that because we deal with a lot of columns here - // which makes the required allocation size hard to guess. - tmp := make([]byte, pos+maskLen+typesLen) - copy(tmp[:pos], data[:pos]) - data = tmp - nullMask = data[pos : pos+maskLen] - pos += maskLen - } else { - nullMask = data[pos : pos+maskLen] - for i := 0; i < maskLen; i++ { - nullMask[i] = 0 - } - pos += maskLen - } - - // newParameterBoundFlag 1 [1 byte] - data[pos] = 0x01 - pos++ - - // type of each parameter [len(args)*2 bytes] - paramTypes := data[pos:] - pos += len(args) * 2 - - // value of each parameter [n bytes] - paramValues := data[pos:pos] - valuesCap := cap(paramValues) - - for i, arg := range args { - // build NULL-bitmap - if arg == nil { - nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = fieldTypeNULL - paramTypes[i+i+1] = 0x00 - continue - } - - // cache types and values - switch v := arg.(type) { - case int64: - paramTypes[i+i] = fieldTypeLongLong - paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } - - case float64: - paramTypes[i+i] = fieldTypeDouble - paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - math.Float64bits(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(math.Float64bits(v))..., - ) - } - - case bool: - paramTypes[i+i] = fieldTypeTiny - paramTypes[i+i+1] = 0x00 - - if v { - paramValues = append(paramValues, 0x01) - } else { - paramValues = append(paramValues, 0x00) - } - - case []byte: - // Common case (non-nil value) first - if v != nil { - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(v)), - ) - paramValues = append(paramValues, v...) - } else { - if err := stmt.writeCommandLongData(i, v); err != nil { - return err - } - } - continue - } - - // Handle []byte(nil) as a NULL value - nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = fieldTypeNULL - paramTypes[i+i+1] = 0x00 - - case string: - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(v)), - ) - paramValues = append(paramValues, v...) - } else { - if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { - return err - } - } - - case time.Time: - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - var val []byte - if v.IsZero() { - val = []byte("0000-00-00") - } else { - val = []byte(v.In(mc.cfg.Loc).Format(timeFormat)) - } - - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(val)), - ) - paramValues = append(paramValues, val...) - - default: - return fmt.Errorf("can not convert type: %T", arg) - } - } - - // Check if param values exceeded the available buffer - // In that case we must build the data packet with the new values buffer - if valuesCap != cap(paramValues) { - data = append(data[:pos], paramValues...) - mc.buf.buf = data - } - - pos += len(paramValues) - data = data[:pos] - } - - return mc.writePacket(data) -} - -func (mc *mysqlConn) discardResults() error { - for mc.status&statusMoreResultsExists != 0 { - resLen, err := mc.readResultSetHeaderPacket() - if err != nil { - return err - } - if resLen > 0 { - // columns - if err := mc.readUntilEOF(); err != nil { - return err - } - // rows - if err := mc.readUntilEOF(); err != nil { - return err - } - } else { - mc.status &^= statusMoreResultsExists - } - } - return nil -} - -// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html -func (rows *binaryRows) readRow(dest []driver.Value) error { - data, err := rows.mc.readPacket() - if err != nil { - return err - } - - // packet indicator [1 byte] - if data[0] != iOK { - // EOF Packet - if data[0] == iEOF && len(data) == 5 { - rows.mc.status = readStatus(data[3:]) - if err := rows.mc.discardResults(); err != nil { - return err - } - rows.mc = nil - return io.EOF - } - rows.mc = nil - - // Error otherwise - return rows.mc.handleErrorPacket(data) - } - - // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] - pos := 1 + (len(dest)+7+2)>>3 - nullMask := data[1:pos] - - for i := range dest { - // Field is NULL - // (byte >> bit-pos) % 2 == 1 - if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { - dest[i] = nil - continue - } - - // Convert to byte-coded string - switch rows.columns[i].fieldType { - case fieldTypeNULL: - dest[i] = nil - continue - - // Numeric Types - case fieldTypeTiny: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(data[pos]) - } else { - dest[i] = int64(int8(data[pos])) - } - pos++ - continue - - case fieldTypeShort, fieldTypeYear: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) - } else { - dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) - } - pos += 2 - continue - - case fieldTypeInt24, fieldTypeLong: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) - } else { - dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) - } - pos += 4 - continue - - case fieldTypeLongLong: - if rows.columns[i].flags&flagUnsigned != 0 { - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - if val > math.MaxInt64 { - dest[i] = uint64ToString(val) - } else { - dest[i] = int64(val) - } - } else { - dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) - } - pos += 8 - continue - - case fieldTypeFloat: - dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) - pos += 4 - continue - - case fieldTypeDouble: - dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) - pos += 8 - continue - - // Length coded Binary Strings - case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, - fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, - fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, - fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: - var isNull bool - var n int - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) - pos += n - if err == nil { - if !isNull { - continue - } else { - dest[i] = nil - continue - } - } - return err - - case - fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD - fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] - fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] - - num, isNull, n := readLengthEncodedInteger(data[pos:]) - pos += n - - switch { - case isNull: - dest[i] = nil - continue - case rows.columns[i].fieldType == fieldTypeTime: - // database/sql does not support an equivalent to TIME, return a string - var dstlen uint8 - switch decimals := rows.columns[i].decimals; decimals { - case 0x00, 0x1f: - dstlen = 8 - case 1, 2, 3, 4, 5, 6: - dstlen = 8 + 1 + decimals - default: - return fmt.Errorf( - "protocol error, illegal decimals value %d", - rows.columns[i].decimals, - ) - } - dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) - case rows.mc.parseTime: - dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc) - default: - var dstlen uint8 - if rows.columns[i].fieldType == fieldTypeDate { - dstlen = 10 - } else { - switch decimals := rows.columns[i].decimals; decimals { - case 0x00, 0x1f: - dstlen = 19 - case 1, 2, 3, 4, 5, 6: - dstlen = 19 + 1 + decimals - default: - return fmt.Errorf( - "protocol error, illegal decimals value %d", - rows.columns[i].decimals, - ) - } - } - dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) - } - - if err == nil { - pos += int(num) - continue - } else { - return err - } - - // Please report if this happens! - default: - return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType) - } - } - - return nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go deleted file mode 100755 index c6438d0..0000000 --- a/vendor/github.com/go-sql-driver/mysql/result.go +++ /dev/null @@ -1,22 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -type mysqlResult struct { - affectedRows int64 - insertId int64 -} - -func (res *mysqlResult) LastInsertId() (int64, error) { - return res.insertId, nil -} - -func (res *mysqlResult) RowsAffected() (int64, error) { - return res.affectedRows, nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go deleted file mode 100755 index c08255e..0000000 --- a/vendor/github.com/go-sql-driver/mysql/rows.go +++ /dev/null @@ -1,112 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "io" -) - -type mysqlField struct { - tableName string - name string - flags fieldFlag - fieldType byte - decimals byte -} - -type mysqlRows struct { - mc *mysqlConn - columns []mysqlField -} - -type binaryRows struct { - mysqlRows -} - -type textRows struct { - mysqlRows -} - -type emptyRows struct{} - -func (rows *mysqlRows) Columns() []string { - columns := make([]string, len(rows.columns)) - if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias { - for i := range columns { - if tableName := rows.columns[i].tableName; len(tableName) > 0 { - columns[i] = tableName + "." + rows.columns[i].name - } else { - columns[i] = rows.columns[i].name - } - } - } else { - for i := range columns { - columns[i] = rows.columns[i].name - } - } - return columns -} - -func (rows *mysqlRows) Close() error { - mc := rows.mc - if mc == nil { - return nil - } - if mc.netConn == nil { - return ErrInvalidConn - } - - // Remove unread packets from stream - err := mc.readUntilEOF() - if err == nil { - if err = mc.discardResults(); err != nil { - return err - } - } - - rows.mc = nil - return err -} - -func (rows *binaryRows) Next(dest []driver.Value) error { - if mc := rows.mc; mc != nil { - if mc.netConn == nil { - return ErrInvalidConn - } - - // Fetch next row from stream - return rows.readRow(dest) - } - return io.EOF -} - -func (rows *textRows) Next(dest []driver.Value) error { - if mc := rows.mc; mc != nil { - if mc.netConn == nil { - return ErrInvalidConn - } - - // Fetch next row from stream - return rows.readRow(dest) - } - return io.EOF -} - -func (rows emptyRows) Columns() []string { - return nil -} - -func (rows emptyRows) Close() error { - return nil -} - -func (rows emptyRows) Next(dest []driver.Value) error { - return io.EOF -} diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go deleted file mode 100755 index ead9a6b..0000000 --- a/vendor/github.com/go-sql-driver/mysql/statement.go +++ /dev/null @@ -1,150 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "fmt" - "reflect" - "strconv" -) - -type mysqlStmt struct { - mc *mysqlConn - id uint32 - paramCount int - columns []mysqlField // cached from the first query -} - -func (stmt *mysqlStmt) Close() error { - if stmt.mc == nil || stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return driver.ErrBadConn - } - - err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) - stmt.mc = nil - return err -} - -func (stmt *mysqlStmt) NumInput() int { - return stmt.paramCount -} - -func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { - return converter{} -} - -func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { - if stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := stmt.writeExecutePacket(args) - if err != nil { - return nil, err - } - - mc := stmt.mc - - mc.affectedRows = 0 - mc.insertId = 0 - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil { - if resLen > 0 { - // Columns - err = mc.readUntilEOF() - if err != nil { - return nil, err - } - - // Rows - err = mc.readUntilEOF() - } - if err == nil { - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, nil - } - } - - return nil, err -} - -func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { - if stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := stmt.writeExecutePacket(args) - if err != nil { - return nil, err - } - - mc := stmt.mc - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err != nil { - return nil, err - } - - rows := new(binaryRows) - - if resLen > 0 { - rows.mc = mc - // Columns - // If not cached, read them and cache them - if stmt.columns == nil { - rows.columns, err = mc.readColumns(resLen) - stmt.columns = rows.columns - } else { - rows.columns = stmt.columns - err = mc.readUntilEOF() - } - } - - return rows, err -} - -type converter struct{} - -func (c converter) ConvertValue(v interface{}) (driver.Value, error) { - if driver.IsValue(v) { - return v, nil - } - - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Ptr: - // indirect pointers - if rv.IsNil() { - return nil, nil - } - return c.ConvertValue(rv.Elem().Interface()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: - return int64(rv.Uint()), nil - case reflect.Uint64: - u64 := rv.Uint() - if u64 >= 1<<63 { - return strconv.FormatUint(u64, 10), nil - } - return int64(u64), nil - case reflect.Float32, reflect.Float64: - return rv.Float(), nil - } - return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) -} diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go deleted file mode 100755 index 33c749b..0000000 --- a/vendor/github.com/go-sql-driver/mysql/transaction.go +++ /dev/null @@ -1,31 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -type mysqlTx struct { - mc *mysqlConn -} - -func (tx *mysqlTx) Commit() (err error) { - if tx.mc == nil || tx.mc.netConn == nil { - return ErrInvalidConn - } - err = tx.mc.exec("COMMIT") - tx.mc = nil - return -} - -func (tx *mysqlTx) Rollback() (err error) { - if tx.mc == nil || tx.mc.netConn == nil { - return ErrInvalidConn - } - err = tx.mc.exec("ROLLBACK") - tx.mc = nil - return -} diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go deleted file mode 100755 index 5dc47eb..0000000 --- a/vendor/github.com/go-sql-driver/mysql/utils.go +++ /dev/null @@ -1,740 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "crypto/sha1" - "crypto/tls" - "database/sql/driver" - "encoding/binary" - "fmt" - "io" - "strings" - "time" -) - -var ( - tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs -) - -// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. -// Use the key as a value in the DSN where tls=value. -// -// rootCertPool := x509.NewCertPool() -// pem, err := ioutil.ReadFile("/path/ca-cert.pem") -// if err != nil { -// log.Fatal(err) -// } -// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { -// log.Fatal("Failed to append PEM.") -// } -// clientCert := make([]tls.Certificate, 0, 1) -// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") -// if err != nil { -// log.Fatal(err) -// } -// clientCert = append(clientCert, certs) -// mysql.RegisterTLSConfig("custom", &tls.Config{ -// RootCAs: rootCertPool, -// Certificates: clientCert, -// }) -// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") -// -func RegisterTLSConfig(key string, config *tls.Config) error { - if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { - return fmt.Errorf("key '%s' is reserved", key) - } - - if tlsConfigRegister == nil { - tlsConfigRegister = make(map[string]*tls.Config) - } - - tlsConfigRegister[key] = config - return nil -} - -// DeregisterTLSConfig removes the tls.Config associated with key. -func DeregisterTLSConfig(key string) { - if tlsConfigRegister != nil { - delete(tlsConfigRegister, key) - } -} - -// Returns the bool value of the input. -// The 2nd return value indicates if the input was a valid bool value -func readBool(input string) (value bool, valid bool) { - switch input { - case "1", "true", "TRUE", "True": - return true, true - case "0", "false", "FALSE", "False": - return false, true - } - - // Not a valid bool value - return -} - -/****************************************************************************** -* Authentication * -******************************************************************************/ - -// Encrypt password using 4.1+ method -func scramblePassword(scramble, password []byte) []byte { - if len(password) == 0 { - return nil - } - - // stage1Hash = SHA1(password) - crypt := sha1.New() - crypt.Write(password) - stage1 := crypt.Sum(nil) - - // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) - // inner Hash - crypt.Reset() - crypt.Write(stage1) - hash := crypt.Sum(nil) - - // outer Hash - crypt.Reset() - crypt.Write(scramble) - crypt.Write(hash) - scramble = crypt.Sum(nil) - - // token = scrambleHash XOR stage1Hash - for i := range scramble { - scramble[i] ^= stage1[i] - } - return scramble -} - -// Encrypt password using pre 4.1 (old password) method -// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c -type myRnd struct { - seed1, seed2 uint32 -} - -const myRndMaxVal = 0x3FFFFFFF - -// Pseudo random number generator -func newMyRnd(seed1, seed2 uint32) *myRnd { - return &myRnd{ - seed1: seed1 % myRndMaxVal, - seed2: seed2 % myRndMaxVal, - } -} - -// Tested to be equivalent to MariaDB's floating point variant -// http://play.golang.org/p/QHvhd4qved -// http://play.golang.org/p/RG0q4ElWDx -func (r *myRnd) NextByte() byte { - r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal - r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal - - return byte(uint64(r.seed1) * 31 / myRndMaxVal) -} - -// Generate binary hash from byte string using insecure pre 4.1 method -func pwHash(password []byte) (result [2]uint32) { - var add uint32 = 7 - var tmp uint32 - - result[0] = 1345345333 - result[1] = 0x12345671 - - for _, c := range password { - // skip spaces and tabs in password - if c == ' ' || c == '\t' { - continue - } - - tmp = uint32(c) - result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) - result[1] += (result[1] << 8) ^ result[0] - add += tmp - } - - // Remove sign bit (1<<31)-1) - result[0] &= 0x7FFFFFFF - result[1] &= 0x7FFFFFFF - - return -} - -// Encrypt password using insecure pre 4.1 method -func scrambleOldPassword(scramble, password []byte) []byte { - if len(password) == 0 { - return nil - } - - scramble = scramble[:8] - - hashPw := pwHash(password) - hashSc := pwHash(scramble) - - r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) - - var out [8]byte - for i := range out { - out[i] = r.NextByte() + 64 - } - - mask := r.NextByte() - for i := range out { - out[i] ^= mask - } - - return out[:] -} - -/****************************************************************************** -* Time related utils * -******************************************************************************/ - -// NullTime represents a time.Time that may be NULL. -// NullTime implements the Scanner interface so -// it can be used as a scan destination: -// -// var nt NullTime -// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) -// ... -// if nt.Valid { -// // use nt.Time -// } else { -// // NULL value -// } -// -// This NullTime implementation is not DriverStatus-specific -type NullTime struct { - Time time.Time - Valid bool // Valid is true if Time is not NULL -} - -// Scan implements the Scanner interface. -// The value type must be time.Time or string / []byte (formatted time-string), -// otherwise Scan fails. -func (nt *NullTime) Scan(value interface{}) (err error) { - if value == nil { - nt.Time, nt.Valid = time.Time{}, false - return - } - - switch v := value.(type) { - case time.Time: - nt.Time, nt.Valid = v, true - return - case []byte: - nt.Time, err = parseDateTime(string(v), time.UTC) - nt.Valid = (err == nil) - return - case string: - nt.Time, err = parseDateTime(v, time.UTC) - nt.Valid = (err == nil) - return - } - - nt.Valid = false - return fmt.Errorf("Can't convert %T to time.Time", value) -} - -// Value implements the DriverStatus Valuer interface. -func (nt NullTime) Value() (driver.Value, error) { - if !nt.Valid { - return nil, nil - } - return nt.Time, nil -} - -func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { - base := "0000-00-00 00:00:00.0000000" - switch len(str) { - case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" - if str == base[:len(str)] { - return - } - t, err = time.Parse(timeFormat[:len(str)], str) - default: - err = fmt.Errorf("invalid time string: %s", str) - return - } - - // Adjust location - if err == nil && loc != time.UTC { - y, mo, d := t.Date() - h, mi, s := t.Clock() - t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil - } - - return -} - -func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { - switch num { - case 0: - return time.Time{}, nil - case 4: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - 0, 0, 0, 0, - loc, - ), nil - case 7: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - int(data[4]), // hour - int(data[5]), // minutes - int(data[6]), // seconds - 0, - loc, - ), nil - case 11: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - int(data[4]), // hour - int(data[5]), // minutes - int(data[6]), // seconds - int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds - loc, - ), nil - } - return nil, fmt.Errorf("invalid DATETIME packet length %d", num) -} - -// zeroDateTime is used in formatBinaryDateTime to avoid an allocation -// if the DATE or DATETIME has the zero value. -// It must never be changed. -// The current behavior depends on database/sql copying the result. -var zeroDateTime = []byte("0000-00-00 00:00:00.000000") - -const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" -const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" - -func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { - // length expects the deterministic length of the zero value, - // negative time and 100+ hours are automatically added if needed - if len(src) == 0 { - if justTime { - return zeroDateTime[11 : 11+length], nil - } - return zeroDateTime[:length], nil - } - var dst []byte // return value - var pt, p1, p2, p3 byte // current digit pair - var zOffs byte // offset of value in zeroDateTime - if justTime { - switch length { - case - 8, // time (can be up to 10 when negative and 100+ hours) - 10, 11, 12, 13, 14, 15: // time with fractional seconds - default: - return nil, fmt.Errorf("illegal TIME length %d", length) - } - switch len(src) { - case 8, 12: - default: - return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) - } - // +2 to enable negative time and 100+ hours - dst = make([]byte, 0, length+2) - if src[0] == 1 { - dst = append(dst, '-') - } - if src[1] != 0 { - hour := uint16(src[1])*24 + uint16(src[5]) - pt = byte(hour / 100) - p1 = byte(hour - 100*uint16(pt)) - dst = append(dst, digits01[pt]) - } else { - p1 = src[5] - } - zOffs = 11 - src = src[6:] - } else { - switch length { - case 10, 19, 21, 22, 23, 24, 25, 26: - default: - t := "DATE" - if length > 10 { - t += "TIME" - } - return nil, fmt.Errorf("illegal %s length %d", t, length) - } - switch len(src) { - case 4, 7, 11: - default: - t := "DATE" - if length > 10 { - t += "TIME" - } - return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) - } - dst = make([]byte, 0, length) - // start with the date - year := binary.LittleEndian.Uint16(src[:2]) - pt = byte(year / 100) - p1 = byte(year - 100*uint16(pt)) - p2, p3 = src[2], src[3] - dst = append(dst, - digits10[pt], digits01[pt], - digits10[p1], digits01[p1], '-', - digits10[p2], digits01[p2], '-', - digits10[p3], digits01[p3], - ) - if length == 10 { - return dst, nil - } - if len(src) == 4 { - return append(dst, zeroDateTime[10:length]...), nil - } - dst = append(dst, ' ') - p1 = src[4] // hour - src = src[5:] - } - // p1 is 2-digit hour, src is after hour - p2, p3 = src[0], src[1] - dst = append(dst, - digits10[p1], digits01[p1], ':', - digits10[p2], digits01[p2], ':', - digits10[p3], digits01[p3], - ) - if length <= byte(len(dst)) { - return dst, nil - } - src = src[2:] - if len(src) == 0 { - return append(dst, zeroDateTime[19:zOffs+length]...), nil - } - microsecs := binary.LittleEndian.Uint32(src[:4]) - p1 = byte(microsecs / 10000) - microsecs -= 10000 * uint32(p1) - p2 = byte(microsecs / 100) - microsecs -= 100 * uint32(p2) - p3 = byte(microsecs) - switch decimals := zOffs + length - 20; decimals { - default: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - digits10[p3], digits01[p3], - ), nil - case 1: - return append(dst, '.', - digits10[p1], - ), nil - case 2: - return append(dst, '.', - digits10[p1], digits01[p1], - ), nil - case 3: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], - ), nil - case 4: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - ), nil - case 5: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - digits10[p3], - ), nil - } -} - -/****************************************************************************** -* Convert from and to bytes * -******************************************************************************/ - -func uint64ToBytes(n uint64) []byte { - return []byte{ - byte(n), - byte(n >> 8), - byte(n >> 16), - byte(n >> 24), - byte(n >> 32), - byte(n >> 40), - byte(n >> 48), - byte(n >> 56), - } -} - -func uint64ToString(n uint64) []byte { - var a [20]byte - i := 20 - - // U+0030 = 0 - // ... - // U+0039 = 9 - - var q uint64 - for n >= 10 { - i-- - q = n / 10 - a[i] = uint8(n-q*10) + 0x30 - n = q - } - - i-- - a[i] = uint8(n) + 0x30 - - return a[i:] -} - -// treats string value as unsigned integer representation -func stringToInt(b []byte) int { - val := 0 - for i := range b { - val *= 10 - val += int(b[i] - 0x30) - } - return val -} - -// returns the string read as a bytes slice, wheter the value is NULL, -// the number of bytes read and an error, in case the string is longer than -// the input slice -func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { - // Get length - num, isNull, n := readLengthEncodedInteger(b) - if num < 1 { - return b[n:n], isNull, n, nil - } - - n += int(num) - - // Check data length - if len(b) >= n { - return b[n-int(num) : n], false, n, nil - } - return nil, false, n, io.EOF -} - -// returns the number of bytes skipped and an error, in case the string is -// longer than the input slice -func skipLengthEncodedString(b []byte) (int, error) { - // Get length - num, _, n := readLengthEncodedInteger(b) - if num < 1 { - return n, nil - } - - n += int(num) - - // Check data length - if len(b) >= n { - return n, nil - } - return n, io.EOF -} - -// returns the number read, whether the value is NULL and the number of bytes read -func readLengthEncodedInteger(b []byte) (uint64, bool, int) { - // See issue #349 - if len(b) == 0 { - return 0, true, 1 - } - switch b[0] { - - // 251: NULL - case 0xfb: - return 0, true, 1 - - // 252: value of following 2 - case 0xfc: - return uint64(b[1]) | uint64(b[2])<<8, false, 3 - - // 253: value of following 3 - case 0xfd: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 - - // 254: value of following 8 - case 0xfe: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | - uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | - uint64(b[7])<<48 | uint64(b[8])<<56, - false, 9 - } - - // 0-250: value of first byte - return uint64(b[0]), false, 1 -} - -// encodes a uint64 value and appends it to the given bytes slice -func appendLengthEncodedInteger(b []byte, n uint64) []byte { - switch { - case n <= 250: - return append(b, byte(n)) - - case n <= 0xffff: - return append(b, 0xfc, byte(n), byte(n>>8)) - - case n <= 0xffffff: - return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) - } - return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), - byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) -} - -// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. -// If cap(buf) is not enough, reallocate new buffer. -func reserveBuffer(buf []byte, appendSize int) []byte { - newSize := len(buf) + appendSize - if cap(buf) < newSize { - // Grow buffer exponentially - newBuf := make([]byte, len(buf)*2+appendSize) - copy(newBuf, buf) - buf = newBuf - } - return buf[:newSize] -} - -// escapeBytesBackslash escapes []byte with backslashes (\) -// This escapes the contents of a string (provided as []byte) by adding backslashes before special -// characters, and turning others into specific escape sequences, such as -// turning newlines into \n and null bytes into \0. -// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 -func escapeBytesBackslash(buf, v []byte) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for _, c := range v { - switch c { - case '\x00': - buf[pos] = '\\' - buf[pos+1] = '0' - pos += 2 - case '\n': - buf[pos] = '\\' - buf[pos+1] = 'n' - pos += 2 - case '\r': - buf[pos] = '\\' - buf[pos+1] = 'r' - pos += 2 - case '\x1a': - buf[pos] = '\\' - buf[pos+1] = 'Z' - pos += 2 - case '\'': - buf[pos] = '\\' - buf[pos+1] = '\'' - pos += 2 - case '"': - buf[pos] = '\\' - buf[pos+1] = '"' - pos += 2 - case '\\': - buf[pos] = '\\' - buf[pos+1] = '\\' - pos += 2 - default: - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -// escapeStringBackslash is similar to escapeBytesBackslash but for string. -func escapeStringBackslash(buf []byte, v string) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for i := 0; i < len(v); i++ { - c := v[i] - switch c { - case '\x00': - buf[pos] = '\\' - buf[pos+1] = '0' - pos += 2 - case '\n': - buf[pos] = '\\' - buf[pos+1] = 'n' - pos += 2 - case '\r': - buf[pos] = '\\' - buf[pos+1] = 'r' - pos += 2 - case '\x1a': - buf[pos] = '\\' - buf[pos+1] = 'Z' - pos += 2 - case '\'': - buf[pos] = '\\' - buf[pos+1] = '\'' - pos += 2 - case '"': - buf[pos] = '\\' - buf[pos+1] = '"' - pos += 2 - case '\\': - buf[pos] = '\\' - buf[pos+1] = '\\' - pos += 2 - default: - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. -// This escapes the contents of a string by doubling up any apostrophes that -// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in -// effect on the server. -// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 -func escapeBytesQuotes(buf, v []byte) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for _, c := range v { - if c == '\'' { - buf[pos] = '\'' - buf[pos+1] = '\'' - pos += 2 - } else { - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -// escapeStringQuotes is similar to escapeBytesQuotes but for string. -func escapeStringQuotes(buf []byte, v string) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for i := 0; i < len(v); i++ { - c := v[i] - if c == '\'' { - buf[pos] = '\'' - buf[pos+1] = '\'' - pos += 2 - } else { - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} diff --git a/vendor/github.com/huichen/sego/README.md b/vendor/github.com/huichen/sego/README.md deleted file mode 100755 index bd3aa65..0000000 --- a/vendor/github.com/huichen/sego/README.md +++ /dev/null @@ -1,43 +0,0 @@ -sego -==== - -Go中文分词 - -词典用双数组trie(Double-Array Trie)实现, -分词器算法为基于词频的最短路径加动态规划。 - -支持普通和搜索引擎两种分词模式,支持用户词典、词性标注,可运行JSON RPC服务。 - -分词速度单线程9MB/s,goroutines并发42MB/s(8核Macbook Pro)。 - -# 安装/更新 - -``` -go get -u github.com/huichen/sego -``` - -# 使用 - - -```go -package main - -import ( - "fmt" - "github.com/huichen/sego" -) - -func main() { - // 载入词典 - var segmenter sego.Segmenter - segmenter.LoadDictionary("github.com/huichen/sego/data/dictionary.txt") - - // 分词 - text := []byte("中华人民共和国中央人民政府") - segments := segmenter.Segment(text) - - // 处理分词结果 - // 支持普通模式和搜索模式两种分词,见代码中SegmentsToString函数的注释。 - fmt.Println(sego.SegmentsToString(segments, false)) -} -``` diff --git a/vendor/github.com/huichen/sego/dictionary.go b/vendor/github.com/huichen/sego/dictionary.go deleted file mode 100755 index ecab852..0000000 --- a/vendor/github.com/huichen/sego/dictionary.go +++ /dev/null @@ -1,65 +0,0 @@ -package sego - -import "github.com/adamzy/cedar-go" - -// Dictionary结构体实现了一个字串前缀树,一个分词可能出现在叶子节点也有可能出现在非叶节点 -type Dictionary struct { - trie *cedar.Cedar // Cedar 前缀树 - maxTokenLength int // 词典中最长的分词 - tokens []Token // 词典中所有的分词,方便遍历 - totalFrequency int64 // 词典中所有分词的频率之和 -} - -func NewDictionary() *Dictionary { - return &Dictionary{trie: cedar.New()} -} - -// 词典中最长的分词 -func (dict *Dictionary) MaxTokenLength() int { - return dict.maxTokenLength -} - -// 词典中分词数目 -func (dict *Dictionary) NumTokens() int { - return len(dict.tokens) -} - -// 词典中所有分词的频率之和 -func (dict *Dictionary) TotalFrequency() int64 { - return dict.totalFrequency -} - -// 向词典中加入一个分词 -func (dict *Dictionary) addToken(token Token) { - bytes := textSliceToBytes(token.text) - _, err := dict.trie.Get(bytes) - if err == nil { - return - } - - dict.trie.Insert(bytes, dict.NumTokens()) - dict.tokens = append(dict.tokens, token) - dict.totalFrequency += int64(token.frequency) - if len(token.text) > dict.maxTokenLength { - dict.maxTokenLength = len(token.text) - } -} - -// 在词典中查找和字元组words可以前缀匹配的所有分词 -// 返回值为找到的分词数 -func (dict *Dictionary) lookupTokens(words []Text, tokens []*Token) (numOfTokens int) { - var id, value int - var err error - for _, word := range words { - id, err = dict.trie.Jump(word, id) - if err != nil { - break - } - value, err = dict.trie.Value(id) - if err == nil { - tokens[numOfTokens] = &dict.tokens[value] - numOfTokens++ - } - } - return -} diff --git a/vendor/github.com/huichen/sego/license.txt b/vendor/github.com/huichen/sego/license.txt deleted file mode 100755 index 1de9ba1..0000000 --- a/vendor/github.com/huichen/sego/license.txt +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2013 Hui Chen - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/huichen/sego/segment.go b/vendor/github.com/huichen/sego/segment.go deleted file mode 100755 index 4400b72..0000000 --- a/vendor/github.com/huichen/sego/segment.go +++ /dev/null @@ -1,28 +0,0 @@ -package sego - -// 文本中的一个分词 -type Segment struct { - // 分词在文本中的起始字节位置 - start int - - // 分词在文本中的结束字节位置(不包括该位置) - end int - - // 分词信息 - token *Token -} - -// 返回分词在文本中的起始字节位置 -func (s *Segment) Start() int { - return s.start -} - -// 返回分词在文本中的结束字节位置(不包括该位置) -func (s *Segment) End() int { - return s.end -} - -// 返回分词信息 -func (s *Segment) Token() *Token { - return s.token -} diff --git a/vendor/github.com/huichen/sego/segmenter.go b/vendor/github.com/huichen/sego/segmenter.go deleted file mode 100755 index 6fa6cd4..0000000 --- a/vendor/github.com/huichen/sego/segmenter.go +++ /dev/null @@ -1,295 +0,0 @@ -//Go中文分词 -package sego - -import ( - "bufio" - "fmt" - "log" - "math" - "os" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -const ( - minTokenFrequency = 2 // 仅从字典文件中读取大于等于此频率的分词 -) - -// 分词器结构体 -type Segmenter struct { - dict *Dictionary -} - -// 该结构体用于记录Viterbi算法中某字元处的向前分词跳转信息 -type jumper struct { - minDistance float32 - token *Token -} - -// 返回分词器使用的词典 -func (seg *Segmenter) Dictionary() *Dictionary { - return seg.dict -} - -// 从文件中载入词典 -// -// 可以载入多个词典文件,文件名用","分隔,排在前面的词典优先载入分词,比如 -// "用户词典.txt,通用词典.txt" -// 当一个分词既出现在用户词典也出现在通用词典中,则优先使用用户词典。 -// -// 词典的格式为(每个分词一行): -// 分词文本 频率 词性 -func (seg *Segmenter) LoadDictionary(files string) { - seg.dict = NewDictionary() - for _, file := range strings.Split(files, ",") { - log.Printf("载入sego词典 %s", file) - dictFile, err := os.Open(file) - defer dictFile.Close() - if err != nil { - log.Fatalf("无法载入字典文件 \"%s\" \n", file) - } - - reader := bufio.NewReader(dictFile) - var text string - var freqText string - var frequency int - var pos string - - // 逐行读入分词 - for { - size, _ := fmt.Fscanln(reader, &text, &freqText, &pos) - - if size == 0 { - // 文件结束 - break - } else if size < 2 { - // 无效行 - continue - } else if size == 2 { - // 没有词性标注时设为空字符串 - pos = "" - } - - // 解析词频 - var err error - frequency, err = strconv.Atoi(freqText) - if err != nil { - continue - } - - // 过滤频率太小的词 - if frequency < minTokenFrequency { - continue - } - - // 将分词添加到字典中 - words := splitTextToWords([]byte(text)) - token := Token{text: words, frequency: frequency, pos: pos} - seg.dict.addToken(token) - } - } - - // 计算每个分词的路径值,路径值含义见Token结构体的注释 - logTotalFrequency := float32(math.Log2(float64(seg.dict.totalFrequency))) - for i := range seg.dict.tokens { - token := &seg.dict.tokens[i] - token.distance = logTotalFrequency - float32(math.Log2(float64(token.frequency))) - } - - // 对每个分词进行细致划分,用于搜索引擎模式,该模式用法见Token结构体的注释。 - for i := range seg.dict.tokens { - token := &seg.dict.tokens[i] - segments := seg.segmentWords(token.text, true) - - // 计算需要添加的子分词数目 - numTokensToAdd := 0 - for iToken := 0; iToken < len(segments); iToken++ { - if len(segments[iToken].token.text) > 1 { - // 略去字元长度为一的分词 - // TODO: 这值得进一步推敲,特别是当字典中有英文复合词的时候 - numTokensToAdd++ - } - } - token.segments = make([]*Segment, numTokensToAdd) - - // 添加子分词 - iSegmentsToAdd := 0 - for iToken := 0; iToken < len(segments); iToken++ { - if len(segments[iToken].token.text) > 1 { - token.segments[iSegmentsToAdd] = &segments[iToken] - iSegmentsToAdd++ - } - } - } - - log.Println("sego词典载入完毕") -} - -// 对文本分词 -// -// 输入参数: -// bytes UTF8文本的字节数组 -// -// 输出: -// []Segment 划分的分词 -func (seg *Segmenter) Segment(bytes []byte) []Segment { - return seg.internalSegment(bytes, false) -} - -func (seg *Segmenter) internalSegment(bytes []byte, searchMode bool) []Segment { - // 处理特殊情况 - if len(bytes) == 0 { - return []Segment{} - } - - // 划分字元 - text := splitTextToWords(bytes) - - return seg.segmentWords(text, searchMode) -} - -func (seg *Segmenter) segmentWords(text []Text, searchMode bool) []Segment { - // 搜索模式下该分词已无继续划分可能的情况 - if searchMode && len(text) == 1 { - return []Segment{} - } - - // jumpers定义了每个字元处的向前跳转信息,包括这个跳转对应的分词, - // 以及从文本段开始到该字元的最短路径值 - jumpers := make([]jumper, len(text)) - - tokens := make([]*Token, seg.dict.maxTokenLength) - for current := 0; current < len(text); current++ { - // 找到前一个字元处的最短路径,以便计算后续路径值 - var baseDistance float32 - if current == 0 { - // 当本字元在文本首部时,基础距离应该是零 - baseDistance = 0 - } else { - baseDistance = jumpers[current-1].minDistance - } - - // 寻找所有以当前字元开头的分词 - numTokens := seg.dict.lookupTokens( - text[current:minInt(current+seg.dict.maxTokenLength, len(text))], tokens) - - // 对所有可能的分词,更新分词结束字元处的跳转信息 - for iToken := 0; iToken < numTokens; iToken++ { - location := current + len(tokens[iToken].text) - 1 - if !searchMode || current != 0 || location != len(text)-1 { - updateJumper(&jumpers[location], baseDistance, tokens[iToken]) - } - } - - // 当前字元没有对应分词时补加一个伪分词 - if numTokens == 0 || len(tokens[0].text) > 1 { - updateJumper(&jumpers[current], baseDistance, - &Token{text: []Text{text[current]}, frequency: 1, distance: 32, pos: "x"}) - } - } - - // 从后向前扫描第一遍得到需要添加的分词数目 - numSeg := 0 - for index := len(text) - 1; index >= 0; { - location := index - len(jumpers[index].token.text) + 1 - numSeg++ - index = location - 1 - } - - // 从后向前扫描第二遍添加分词到最终结果 - outputSegments := make([]Segment, numSeg) - for index := len(text) - 1; index >= 0; { - location := index - len(jumpers[index].token.text) + 1 - numSeg-- - outputSegments[numSeg].token = jumpers[index].token - index = location - 1 - } - - // 计算各个分词的字节位置 - bytePosition := 0 - for iSeg := 0; iSeg < len(outputSegments); iSeg++ { - outputSegments[iSeg].start = bytePosition - bytePosition += textSliceByteLength(outputSegments[iSeg].token.text) - outputSegments[iSeg].end = bytePosition - } - return outputSegments -} - -// 更新跳转信息: -// 1. 当该位置从未被访问过时(jumper.minDistance为零的情况),或者 -// 2. 当该位置的当前最短路径大于新的最短路径时 -// 将当前位置的最短路径值更新为baseDistance加上新分词的概率 -func updateJumper(jumper *jumper, baseDistance float32, token *Token) { - newDistance := baseDistance + token.distance - if jumper.minDistance == 0 || jumper.minDistance > newDistance { - jumper.minDistance = newDistance - jumper.token = token - } -} - -// 取两整数较小值 -func minInt(a, b int) int { - if a > b { - return b - } - return a -} - -// 取两整数较大值 -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -// 将文本划分成字元 -func splitTextToWords(text Text) []Text { - output := make([]Text, 0, len(text)/3) - current := 0 - inAlphanumeric := true - alphanumericStart := 0 - for current < len(text) { - r, size := utf8.DecodeRune(text[current:]) - if size <= 2 && (unicode.IsLetter(r) || unicode.IsNumber(r)) { - // 当前是拉丁字母或数字(非中日韩文字) - if !inAlphanumeric { - alphanumericStart = current - inAlphanumeric = true - } - } else { - if inAlphanumeric { - inAlphanumeric = false - if current != 0 { - output = append(output, toLower(text[alphanumericStart:current])) - } - } - output = append(output, text[current:current+size]) - } - current += size - } - - // 处理最后一个字元是英文的情况 - if inAlphanumeric { - if current != 0 { - output = append(output, toLower(text[alphanumericStart:current])) - } - } - - return output -} - -// 将英文词转化为小写 -func toLower(text []byte) []byte { - output := make([]byte, len(text)) - for i, t := range text { - if t >= 'A' && t <= 'Z' { - output[i] = t - 'A' + 'a' - } else { - output[i] = t - } - } - return output -} diff --git a/vendor/github.com/huichen/sego/test_utils.go b/vendor/github.com/huichen/sego/test_utils.go deleted file mode 100755 index a73ba88..0000000 --- a/vendor/github.com/huichen/sego/test_utils.go +++ /dev/null @@ -1,38 +0,0 @@ -package sego - -import ( - "fmt" - "testing" -) - -func expect(t *testing.T, expect string, actual interface{}) { - actualString := fmt.Sprint(actual) - if expect != actualString { - t.Errorf("期待值=\"%s\", 实际=\"%s\"", expect, actualString) - } -} - -func printTokens(tokens []*Token, numTokens int) (output string) { - for iToken := 0; iToken < numTokens; iToken++ { - for _, word := range tokens[iToken].text { - output += fmt.Sprint(string(word)) - } - output += " " - } - return -} - -func toWords(strings ...string) []Text { - words := []Text{} - for _, s := range strings { - words = append(words, []byte(s)) - } - return words -} - -func bytesToString(bytes []Text) (output string) { - for _, b := range bytes { - output += (string(b) + "/") - } - return -} diff --git a/vendor/github.com/huichen/sego/token.go b/vendor/github.com/huichen/sego/token.go deleted file mode 100755 index 3157bb1..0000000 --- a/vendor/github.com/huichen/sego/token.go +++ /dev/null @@ -1,50 +0,0 @@ -package sego - -// 字串类型,可以用来表达 -// 1. 一个字元,比如"中"又如"国", 英文的一个字元是一个词 -// 2. 一个分词,比如"中国"又如"人口" -// 3. 一段文字,比如"中国有十三亿人口" -type Text []byte - -// 一个分词 -type Token struct { - // 分词的字串,这实际上是个字元数组 - text []Text - - // 分词在语料库中的词频 - frequency int - - // log2(总词频/该分词词频),这相当于log2(1/p(分词)),用作动态规划中 - // 该分词的路径长度。求解prod(p(分词))的最大值相当于求解 - // sum(distance(分词))的最小值,这就是“最短路径”的来历。 - distance float32 - - // 词性标注 - pos string - - // 该分词文本的进一步分词划分,见Segments函数注释。 - segments []*Segment -} - -// 返回分词文本 -func (token *Token) Text() string { - return textSliceToString(token.text) -} - -// 返回分词在语料库中的词频 -func (token *Token) Frequency() int { - return token.frequency -} - -// 返回分词词性标注 -func (token *Token) Pos() string { - return token.pos -} - -// 该分词文本的进一步分词划分,比如"中华人民共和国中央人民政府"这个分词 -// 有两个子分词"中华人民共和国"和"中央人民政府"。子分词也可以进一步有子分词 -// 形成一个树结构,遍历这个树就可以得到该分词的所有细致分词划分,这主要 -// 用于搜索引擎对一段文本进行全文搜索。 -func (token *Token) Segments() []*Segment { - return token.segments -} diff --git a/vendor/github.com/huichen/sego/utils.go b/vendor/github.com/huichen/sego/utils.go deleted file mode 100755 index f50b079..0000000 --- a/vendor/github.com/huichen/sego/utils.go +++ /dev/null @@ -1,93 +0,0 @@ -package sego - -import ( - "bytes" - "fmt" -) - -// 输出分词结果为字符串 -// -// 有两种输出模式,以"中华人民共和国"为例 -// -// 普通模式(searchMode=false)输出一个分词"中华人民共和国/ns " -// 搜索模式(searchMode=true) 输出普通模式的再细致切分: -// "中华/nz 人民/n 共和/nz 共和国/ns 人民共和国/nt 中华人民共和国/ns " -// -// 搜索模式主要用于给搜索引擎提供尽可能多的关键字,详情请见Token结构体的注释。 -func SegmentsToString(segs []Segment, searchMode bool) (output string) { - if searchMode { - for _, seg := range segs { - output += tokenToString(seg.token) - } - } else { - for _, seg := range segs { - output += fmt.Sprintf( - "%s/%s ", textSliceToString(seg.token.text), seg.token.pos) - } - } - return -} - -func tokenToString(token *Token) (output string) { - for _, s := range token.segments { - output += tokenToString(s.token) - } - output += fmt.Sprintf("%s/%s ", textSliceToString(token.text), token.pos) - return -} - -// 输出分词结果到一个字符串slice -// -// 有两种输出模式,以"中华人民共和国"为例 -// -// 普通模式(searchMode=false)输出一个分词"[中华人民共和国]" -// 搜索模式(searchMode=true) 输出普通模式的再细致切分: -// "[中华 人民 共和 共和国 人民共和国 中华人民共和国]" -// -// 搜索模式主要用于给搜索引擎提供尽可能多的关键字,详情请见Token结构体的注释。 - -func SegmentsToSlice(segs []Segment, searchMode bool) (output []string) { - if searchMode { - for _, seg := range segs { - output = append(output, tokenToSlice(seg.token)...) - } - } else { - for _, seg := range segs { - output = append(output, seg.token.Text()) - } - } - return -} - -func tokenToSlice(token *Token) (output []string) { - for _, s := range token.segments { - output = append(output, tokenToSlice(s.token)...) - } - output = append(output, textSliceToString(token.text)) - return output -} - -// 将多个字元拼接一个字符串输出 -func textSliceToString(text []Text) string { - var output string - for _, word := range text { - output += string(word) - } - return output -} - -// 返回多个字元的字节总长度 -func textSliceByteLength(text []Text) (length int) { - for _, word := range text { - length += len(word) - } - return -} - -func textSliceToBytes(text []Text) []byte { - var buf bytes.Buffer - for _, word := range text { - buf.Write(word) - } - return buf.Bytes() -} diff --git a/vendor/github.com/mozillazg/go-cos/CHANGELOG.md b/vendor/github.com/mozillazg/go-cos/CHANGELOG.md deleted file mode 100644 index 7e252fd..0000000 --- a/vendor/github.com/mozillazg/go-cos/CHANGELOG.md +++ /dev/null @@ -1,105 +0,0 @@ -# Changelog - - -## [0.9.0] (2018-08-04) - -### 新增 - -* 新增 `c.Object.PresignedURL` 用于获取预签名授权 URL。 - 可用于无需知道 SecretID 和 SecretKey 就可以上传和下载文件。 -* 上传和下载 Object 的功能支持指定预签名授权 URL。 - -详见 PR 以及使用示例: - -* https://github.com/mozillazg/go-cos/pull/5 -* 通过预签名授权 URL 下载文件,示例:[object/getWithPresignedURL.go](./_example/object/getWithPresignedURL.go) -* 通过预签名授权 URL 上传文件,示例:[object/putWithPresignedURL.go](./_example/object/putWithPresignedURL.go) - - -## [0.8.0] (2018-05-26) - -### 新增 - -* 新增 `func NewBaseURL(bucketURL string) (u *BaseURL, err error)` (via [91f7759]) - -### 变更 - -* `NewBucketURL` 函数使用新的 URL 域名规则。(via [7dcd701]) - 影响:如果有使用 `NewBucketURL` 函数生成 bucketURL 的话,使用时需要使用新的 Region 名称, - 详见 https://cloud.tencent.com/document/product/436/6224 ,未使用 `NewBucketURL` 函数不受影响 - - -## [0.7.0] (2017-12-23) - -### 新增 - -* 支持新增的 Put Object Copy API -* 新增 `github.com/mozillazg/go-cos/debug`,目前只包含 `DebugRequestTransport` - - -## [0.6.0] (2017-07-09) - -### 新增 - -* 增加说明在某些情况下 ObjectPutHeaderOptions.ContentLength 必须要指定 -* 增加 ObjectUploadPartOptions.ContentLength - - -## [0.5.0] (2017-06-28) - -### 修复 - -* 修复 ACL 相关 API 突然失效的问题. - (因为 COS ACL 相关 API 的 request 和 response xml body 的结构发生了变化) - -### 删除 - -* 删除调试用的 DebugRequestTransport(把它移动到 examples/ 中) - - -## [0.4.0] (2017-06-24) - -### 新增 - -* 增加 AuthorizationTransport 辅助添加认证信息 - -### 修改 - -* 去掉 API 中的 authTime 参数,默认不再自动添加 Authorization header - 改为通过自定义 client 的方式来添加认证信息 - - -## [0.3.0] (2017-06-23) - -### 新增 - -* 完成剩下的所有 API - - -## [0.2.0] (2017-06-10) - -### 变更 - -* 调用 bucket 相关 API 时不再需要 bucket 参数, 把参数移到 service 中 -* 把参数 signStartTime, signEndTime, keyStartTime, keyEndTime 合并为 authTime - - -## 0.1.0 (2017-06-10) - -### 新增 - -* 完成 Service API -* 完成大部分 Bucket API(还剩一个 Put Bucket Lifecycle) - - -[0.9.0]: https://github.com/mozillazg/go-cos/compare/v0.8.0...v0.9.0 -[0.8.0]: https://github.com/mozillazg/go-cos/compare/v0.7.0...v0.8.0 -[0.7.0]: https://github.com/mozillazg/go-cos/compare/v0.6.0...v0.7.0 -[0.6.0]: https://github.com/mozillazg/go-cos/compare/v0.5.0...v0.6.0 -[0.5.0]: https://github.com/mozillazg/go-cos/compare/v0.4.0...v0.5.0 -[0.4.0]: https://github.com/mozillazg/go-cos/compare/v0.3.0...v0.4.0 -[0.3.0]: https://github.com/mozillazg/go-cos/compare/v0.2.0...v0.3.0 -[0.2.0]: https://github.com/mozillazg/go-cos/compare/v0.1.0...v0.2.0 - -[91f7759]: https://github.com/mozillazg/go-cos/commit/91f7759958f9631e8997f47d30ae4044455fc971 -[7dcd701]: https://github.com/mozillazg/go-cos/commit/7dcd701975f483d57525b292ab31d0f9a6c8866c diff --git a/vendor/github.com/mozillazg/go-cos/LICENSE b/vendor/github.com/mozillazg/go-cos/LICENSE deleted file mode 100644 index 8ff7942..0000000 --- a/vendor/github.com/mozillazg/go-cos/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 mozillazg - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mozillazg/go-cos/Makefile b/vendor/github.com/mozillazg/go-cos/Makefile deleted file mode 100644 index 037750b..0000000 --- a/vendor/github.com/mozillazg/go-cos/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -help: - @echo "test run test" - @echo "lint run lint" - @echo "example run examples" - -.PHONY: test -test: - go test -v -cover -coverprofile cover.out - go tool cover -html=cover.out -o cover.html - -open cover.html - -.PHONY: lint -lint: - gofmt -s -w . - goimports -w . - golint . - go vet - -.PHONY: example -example: - cd _example && bash test.sh diff --git a/vendor/github.com/mozillazg/go-cos/README.md b/vendor/github.com/mozillazg/go-cos/README.md deleted file mode 100644 index 5756002..0000000 --- a/vendor/github.com/mozillazg/go-cos/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# go-cos - -腾讯云对象存储服务 COS(Cloud Object Storage) Go SDK(API 版本:V5 版本的 XML API)。 - -[![Build Status](https://img.shields.io/travis/mozillazg/go-cos/master.svg)](https://travis-ci.org/mozillazg/go-cos) -[![Coverage Status](https://img.shields.io/coveralls/mozillazg/go-cos/master.svg)](https://coveralls.io/r/mozillazg/go-cos?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/mozillazg/go-cos)](https://goreportcard.com/report/github.com/mozillazg/go-cos) -[![GoDoc](https://godoc.org/github.com/mozillazg/go-cos?status.svg)](https://godoc.org/github.com/mozillazg/go-cos) - -## Install - -`go get -u github.com/mozillazg/go-cos` - - -## Usage - -```go -package main - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - - "github.com/mozillazg/go-cos" -) - -func main() { - b, _ := cos.NewBaseURL("https://-.cos..myqcloud.com") - c := cos.NewClient(b, &http.Client{ - Transport: &cos.AuthorizationTransport{ - SecretID: os.Getenv("COS_SECRETID"), - SecretKey: os.Getenv("COS_SECRETKEY"), - }, - }) - - name := "test/hello.txt" - resp, err := c.Object.Get(context.Background(), name, nil) - if err != nil { - panic(err) - } - bs, _ := ioutil.ReadAll(resp.Body) - resp.Body.Close() - fmt.Printf("%s\n", string(bs)) -} -``` - -所有的 API 在 [_example](./_example/) 目录下都有对应的使用示例。 - -## TODO - -Service API: - -* [x] Get Service(使用示例:[service/get.go](./_example/service/get.go)) - -Bucket API: - -* [x] **Get Bucket**(搜索文件,使用示例:[bucket/get.go](./_example/bucket/get.go)) -* [x] Get Bucket ACL(使用示例:[bucket/getACL.go](./_example/bucket/getACL.go)) -* [x] Get Bucket CORS(使用示例:[bucket/getCORS.go](./_example/bucket/getCORS.go)) -* [x] Get Bucket Location(使用示例:[bucket/getLocation.go](./_example/bucket/getLocation.go)) -* [x] Get Buket Lifecycle(使用示例:[bucket/getLifecycle.go](./_example/bucket/getLifecycle.go)) -* [x] Get Bucket Tagging(使用示例:[bucket/getTagging.go](./_example/bucket/getTagging.go)) -* [x] Put Bucket(创建 bucket,使用示例:[bucket/put.go](./_example/bucket/put.go)) -* [x] Put Bucket ACL(使用示例:[bucket/putACL.go](./_example/bucket/putACL.go)) -* [x] Put Bucket CORS(使用示例:[bucket/putCORS.go](./_example/bucket/putCORS.go)) -* [x] Put Bucket Lifecycle(使用示例:[bucket/putLifecycle.go](./_example/bucket/putLifecycle.go)) -* [x] Put Bucket Tagging(使用示例:[bucket/putTagging.go](./_example/bucket/putTagging.go)) -* [x] Delete Bucket(删除 bucket,使用示例:[bucket/delete.go](./_example/bucket/delete.go)) -* [x] Delete Bucket CORS(使用示例:[bucket/deleteCORS.go](./_example/bucket/deleteCORS.go)) -* [x] Delete Bucket Lifecycle(使用示例:[bucket/deleteLifecycle.go](./_example/bucket/deleteLifecycle.go)) -* [x] Delete Bucket Tagging(使用示例:[bucket/deleteTagging.go](./_example/bucket/deleteTagging.go)) -* [x] Head Bucket(使用示例:[bucket/head.go](./_example/bucket/head.go)) -* [x] List Multipart Uploads(查询上传的分块,使用示例:[bucket/listMultipartUploads.go](./_example/bucket/listMultipartUploads.go)) - -Object API: - -* [x] **Append Object**(增量更新文件,使用示例:[object/append.go](./_example/object/append.go)) -* [x] **Get Object**(下载文件,使用示例:[object/get.go](./_example/object/get.go)) -* [x] Get Object ACL(使用示例:[object/getACL.go](./_example/object/getACL.go)) -* [x] **Put Object**(上传文件,使用示例:[object/put.go](./_example/object/put.go)) -* [x] Put Object ACL(使用示例:[object/putACL.go](./_example/object/putACL.go)) -* [x] Put Object Copy(使用示例:[object/copy.go](./_example/object/copy.go)) -* [x] **Delete Object**(删除文件,使用示例:[object/delete.go](./_example/object/delete.go)) -* [x] Delete Multiple Object(使用示例:[object/deleteMultiple.go](./_example/object/deleteMultiple.go)) -* [x] Head Object(使用示例:[object/head.go](./_example/object/head.go)) -* [x] Options Object(使用示例:[object/options.go](./_example/object/options.go)) -* [x] **Initiate Multipart Upload**(初始化分块上传,使用示例:[object/initiateMultipartUpload.go](./_example/object/initiateMultipartUpload.go)) -* [x] **Upload Part**(上传一个分块,使用示例:[object/uploadPart.go](./_example/object/uploadPart.go)) -* [x] **List Parts**(列出已上传的分块,使用示例:[object/listParts.go](./_example/object/listParts.go)) -* [x] **Complete Multipart Upload**(合并上传的分块,使用示例:[object/completeMultipartUpload.go](./_example/object/completeMultipartUpload.go)) -* [x] **Abort Multipart Upload**(取消分块上传,使用示例:[object/abortMultipartUpload.go](./_example/object/abortMultipartUpload.go)) - -其他功能: - -* [x] **生成预签名授权 URL** - * [x] 通过预签名授权 URL 下载文件,示例:[object/getWithPresignedURL.go](./_example/object/getWithPresignedURL.go) - * [x] 通过预签名授权 URL 上传文件,示例:[object/putWithPresignedURL.go](./_example/object/putWithPresignedURL.go) diff --git a/vendor/github.com/mozillazg/go-cos/auth.go b/vendor/github.com/mozillazg/go-cos/auth.go deleted file mode 100644 index 3390d31..0000000 --- a/vendor/github.com/mozillazg/go-cos/auth.go +++ /dev/null @@ -1,253 +0,0 @@ -package cos - -import ( - "crypto/hmac" - "crypto/sha1" - "fmt" - "hash" - "net/http" - "net/url" - "sort" - "strings" - "time" -) - -const sha1SignAlgorithm = "sha1" -const privateHeaderPrefix = "x-cos-" -const defaultAuthExpire = time.Hour - -// 需要校验的 Headers 列表 -var needSignHeaders = map[string]bool{ - "host": true, - "range": true, - "x-cos-acl": true, - "x-cos-grant-read": true, - "x-cos-grant-write": true, - "x-cos-grant-full-control": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, - "cache-control": true, - "content-disposition": true, - "content-encoding": true, - // "content-type": true, - "content-length": true, - "content-md5": true, - "expect": true, - "expires": true, - "x-cos-content-sha1": true, - "x-cos-storage-class": true, - "if-modified-since": true, - "origin": true, - "access-control-request-method": true, - "access-control-request-headers": true, - "x-cos-object-type": true, -} - -// AuthTime 用于生成签名所需的 q-sign-time 和 q-key-time 相关参数 -type AuthTime struct { - SignStartTime time.Time - SignEndTime time.Time - KeyStartTime time.Time - KeyEndTime time.Time -} - -// NewAuthTime 生成 AuthTime 的便捷函数 -// -// expire: 从现在开始多久过期. -func NewAuthTime(expire time.Duration) *AuthTime { - if expire == time.Duration(0) { - expire = defaultAuthExpire - } - signStartTime := time.Now() - keyStartTime := signStartTime - signEndTime := signStartTime.Add(expire) - keyEndTime := signEndTime - return &AuthTime{ - SignStartTime: signStartTime, - SignEndTime: signEndTime, - KeyStartTime: keyStartTime, - KeyEndTime: keyEndTime, - } -} - -// signString return q-sign-time string -func (a *AuthTime) signString() string { - return fmt.Sprintf("%d;%d", a.SignStartTime.Unix(), a.SignEndTime.Unix()) -} - -// keyString return q-key-time string -func (a *AuthTime) keyString() string { - return fmt.Sprintf("%d;%d", a.KeyStartTime.Unix(), a.KeyEndTime.Unix()) -} - -// newAuthorization 通过一系列步骤生成最终需要的 Authorization 字符串 -func newAuthorization(auth Auth, req *http.Request, authTime AuthTime) string { - secretKey := auth.SecretKey - secretID := auth.SecretID - signTime := authTime.signString() - keyTime := authTime.keyString() - signKey := calSignKey(secretKey, keyTime) - - formatHeaders, signedHeaderList := genFormatHeaders(req.Header) - formatParameters, signedParameterList := genFormatParameters(req.URL.Query()) - formatString := genFormatString(req.Method, *req.URL, formatParameters, formatHeaders) - - stringToSign := calStringToSign(sha1SignAlgorithm, keyTime, formatString) - signature := calSignature(signKey, stringToSign) - - return genAuthorization( - secretID, signTime, keyTime, signature, signedHeaderList, - signedParameterList, - ) -} - -// AddAuthorizationHeader 给 req 增加签名信息 -func AddAuthorizationHeader(secretID, secretKey string, req *http.Request, authTime *AuthTime) { - auth := newAuthorization(Auth{ - SecretID: secretID, - SecretKey: secretKey, - }, req, *authTime) - req.Header.Set("Authorization", auth) -} - -// calSignKey 计算 SignKey -func calSignKey(secretKey, keyTime string) string { - digest := calHMACDigest(secretKey, keyTime, sha1SignAlgorithm) - return fmt.Sprintf("%x", digest) -} - -// calStringToSign 计算 StringToSign -func calStringToSign(signAlgorithm, signTime, formatString string) string { - h := sha1.New() - h.Write([]byte(formatString)) - return fmt.Sprintf("%s\n%s\n%x\n", signAlgorithm, signTime, h.Sum(nil)) -} - -// calSignature 计算 Signature -func calSignature(signKey, stringToSign string) string { - digest := calHMACDigest(signKey, stringToSign, sha1SignAlgorithm) - return fmt.Sprintf("%x", digest) -} - -// genAuthorization 生成 Authorization -func genAuthorization(secretID, signTime, keyTime, signature string, signedHeaderList, signedParameterList []string) string { - return strings.Join([]string{ - "q-sign-algorithm=" + sha1SignAlgorithm, - "q-ak=" + secretID, - "q-sign-time=" + signTime, - "q-key-time=" + keyTime, - "q-header-list=" + strings.Join(signedHeaderList, ";"), - "q-url-param-list=" + strings.Join(signedParameterList, ";"), - "q-signature=" + signature, - }, "&") -} - -// genFormatString 生成 FormatString -func genFormatString(method string, uri url.URL, formatParameters, formatHeaders string) string { - formatMethod := strings.ToLower(method) - formatURI := uri.Path - - return fmt.Sprintf("%s\n%s\n%s\n%s\n", formatMethod, formatURI, - formatParameters, formatHeaders, - ) -} - -// genFormatParameters 生成 FormatParameters 和 SignedParameterList -func genFormatParameters(parameters url.Values) (formatParameters string, signedParameterList []string) { - ps := url.Values{} - for key, values := range parameters { - for _, value := range values { - key = strings.ToLower(key) - ps.Add(key, value) - signedParameterList = append(signedParameterList, key) - } - } - //formatParameters = strings.ToLower(ps.Encode()) - formatParameters = ps.Encode() - sort.Strings(signedParameterList) - return -} - -// genFormatHeaders 生成 FormatHeaders 和 SignedHeaderList -func genFormatHeaders(headers http.Header) (formatHeaders string, signedHeaderList []string) { - hs := url.Values{} - for key, values := range headers { - for _, value := range values { - key = strings.ToLower(key) - if isSignHeader(key) { - hs.Add(key, value) - signedHeaderList = append(signedHeaderList, key) - } - } - } - formatHeaders = hs.Encode() - sort.Strings(signedHeaderList) - return -} - -// HMAC 签名 -func calHMACDigest(key, msg, signMethod string) []byte { - var hashFunc func() hash.Hash - switch signMethod { - case "sha1": - hashFunc = sha1.New - default: - hashFunc = sha1.New - } - h := hmac.New(hashFunc, []byte(key)) - h.Write([]byte(msg)) - return h.Sum(nil) -} - -func isSignHeader(key string) bool { - for k, v := range needSignHeaders { - if key == k && v { - return true - } - } - return strings.HasPrefix(key, privateHeaderPrefix) -} - -// Auth 签名相关的认证信息 -type Auth struct { - SecretID string - SecretKey string - // 签名多久过期,默认是 time.Hour - Expire time.Duration -} - -// AuthorizationTransport 给请求增加 Authorization header -type AuthorizationTransport struct { - SecretID string - SecretKey string - // 签名多久过期,默认是 time.Hour - Expire time.Duration - - Transport http.RoundTripper -} - -// RoundTrip implements the RoundTripper interface. -func (t *AuthorizationTransport) RoundTrip(req *http.Request) (*http.Response, error) { - // 使用预签名授权 URL 时跳过添加 Authorization header 的步骤 - if req.URL.Query().Get("sign") == "" { - req = cloneRequest(req) // per RoundTrip contract - - // 增加 Authorization header - authTime := NewAuthTime(t.Expire) - AddAuthorizationHeader(t.SecretID, t.SecretKey, req, authTime) - } - - resp, err := t.transport().RoundTrip(req) - return resp, err -} - -func (t *AuthorizationTransport) transport() http.RoundTripper { - if t.Transport != nil { - return t.Transport - } - return http.DefaultTransport -} diff --git a/vendor/github.com/mozillazg/go-cos/bucket.go b/vendor/github.com/mozillazg/go-cos/bucket.go deleted file mode 100644 index cebd6b0..0000000 --- a/vendor/github.com/mozillazg/go-cos/bucket.go +++ /dev/null @@ -1,107 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "net/http" -) - -// BucketService ... -// -// Bucket 相关 API -type BucketService service - -// BucketGetResult ... -type BucketGetResult struct { - XMLName xml.Name `xml:"ListBucketResult"` - Name string - Prefix string `xml:"Prefix,omitempty"` - Marker string `xml:"Marker,omitempty"` - NextMarker string `xml:"NextMarker,omitempty"` - Delimiter string `xml:"Delimiter,omitempty"` - MaxKeys int - IsTruncated bool - Contents []Object `xml:"Contents,omitempty"` - CommonPrefixes []string `xml:"CommonPrefixes>Prefix,omitempty"` - EncodingType string `xml:"Encoding-Type,omitempty"` -} - -// BucketGetOptions ... -type BucketGetOptions struct { - Prefix string `url:"prefix,omitempty"` - Delimiter string `url:"delimiter,omitempty"` - EncodingType string `url:"encoding-type,omitempty"` - Marker string `url:"marker,omitempty"` - MaxKeys int `url:"max-keys,omitempty"` -} - -// Get Bucket请求等同于 List Object请求,可以列出该Bucket下部分或者所有Object,发起该请求需要拥有Read权限。 -// -// https://www.qcloud.com/document/product/436/7734 -func (s *BucketService) Get(ctx context.Context, opt *BucketGetOptions) (*BucketGetResult, *Response, error) { - var res BucketGetResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/", - method: http.MethodGet, - optQuery: opt, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// BucketPutOptions ... -type BucketPutOptions ACLHeaderOptions - -// Put Bucket请求可以在指定账号下创建一个Bucket。 -// -// https://www.qcloud.com/document/product/436/7738 -func (s *BucketService) Put(ctx context.Context, opt *BucketPutOptions) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/", - method: http.MethodPut, - optHeader: opt, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// Delete Bucket请求可以在指定账号下删除Bucket,删除之前要求Bucket为空。 -// -// https://www.qcloud.com/document/product/436/7732 -func (s *BucketService) Delete(ctx context.Context) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/", - method: http.MethodDelete, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// Head Bucket请求可以确认是否存在该Bucket,是否有权限访问,Head的权限与Read一致。 -// -// 当其存在时,返回 HTTP 状态码200; -// 当无权限时,返回 HTTP 状态码403; -// 当不存在时,返回 HTTP 状态码404。 -// -// https://www.qcloud.com/document/product/436/7735 -func (s *BucketService) Head(ctx context.Context) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/", - method: http.MethodHead, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// Bucket ... -type Bucket struct { - Name string - AppID string `xml:",omitempty"` - Region string `xml:"Location,omitempty"` - CreateDate string `xml:",omitempty"` -} diff --git a/vendor/github.com/mozillazg/go-cos/bucket_acl.go b/vendor/github.com/mozillazg/go-cos/bucket_acl.go deleted file mode 100644 index 923313f..0000000 --- a/vendor/github.com/mozillazg/go-cos/bucket_acl.go +++ /dev/null @@ -1,62 +0,0 @@ -package cos - -import ( - "context" - "net/http" -) - -// BucketGetACLResult ... -type BucketGetACLResult ACLXml - -// GetACL 使用API读取Bucket的ACL表,只有所有者有权操作。 -// -// https://www.qcloud.com/document/product/436/7733 -func (s *BucketService) GetACL(ctx context.Context) (*BucketGetACLResult, *Response, error) { - var res BucketGetACLResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?acl", - method: http.MethodGet, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// BucketPutACLOptions ... -type BucketPutACLOptions struct { - Header *ACLHeaderOptions `url:"-" xml:"-"` - Body *ACLXml `url:"-" header:"-"` -} - -// PutACL 使用API写入Bucket的ACL表,您可以通过Header:"x-cos-acl","x-cos-grant-read", -// "x-cos-grant-write","x-cos-grant-full-control"传入ACL信息,也可以通过body以XML格式传入ACL信息, -// -// 但是只能选择Header和Body其中一种,否则返回冲突。 -// -// Put Bucket ACL是一个覆盖操作,传入新的ACL将覆盖原有ACL。只有所有者有权操作。 -// -// "x-cos-acl":枚举值为public-read,private;public-read意味这个Bucket有公有读私有写的权限, -// private意味这个Bucket有私有读写的权限。 -// -// "x-cos-grant-read":意味被赋予权限的用户拥有该Bucket的读权限 -// "x-cos-grant-write":意味被赋予权限的用户拥有该Bucket的写权限 -// "x-cos-grant-full-control":意味被赋予权限的用户拥有该Bucket的读写权限 -// -// https://www.qcloud.com/document/product/436/7737 -func (s *BucketService) PutACL(ctx context.Context, opt *BucketPutACLOptions) (*Response, error) { - header := opt.Header - body := opt.Body - if body != nil { - header = nil - } - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?acl", - method: http.MethodPut, - body: body, - optHeader: header, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} diff --git a/vendor/github.com/mozillazg/go-cos/bucket_cors.go b/vendor/github.com/mozillazg/go-cos/bucket_cors.go deleted file mode 100644 index 80da684..0000000 --- a/vendor/github.com/mozillazg/go-cos/bucket_cors.go +++ /dev/null @@ -1,77 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "net/http" -) - -// BucketCORSRule ... -type BucketCORSRule struct { - ID string `xml:"ID,omitempty"` - AllowedMethods []string `xml:"AllowedMethod"` - AllowedOrigins []string `xml:"AllowedOrigin"` - AllowedHeaders []string `xml:"AllowedHeader,omitempty"` - MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty"` - ExposeHeaders []string `xml:"ExposeHeader,omitempty"` -} - -// BucketGetCORSResult ... -type BucketGetCORSResult struct { - XMLName xml.Name `xml:"CORSConfiguration"` - Rules []BucketCORSRule `xml:"CORSRule,omitempty"` -} - -// GetCORS ... -// -// Get Bucket CORS实现跨域访问配置读取。 -// -// https://www.qcloud.com/document/product/436/8274 -func (s *BucketService) GetCORS(ctx context.Context) (*BucketGetCORSResult, *Response, error) { - var res BucketGetCORSResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?cors", - method: http.MethodGet, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// BucketPutCORSOptions ... -type BucketPutCORSOptions struct { - XMLName xml.Name `xml:"CORSConfiguration"` - Rules []BucketCORSRule `xml:"CORSRule,omitempty"` -} - -// PutCORS ... -// -// Put Bucket CORS实现跨域访问设置,您可以通过传入XML格式的配置文件实现配置,文件大小限制为64 KB。 -// -// https://www.qcloud.com/document/product/436/8279 -func (s *BucketService) PutCORS(ctx context.Context, opt *BucketPutCORSOptions) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?cors", - method: http.MethodPut, - body: opt, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// DeleteCORS ... -// -// Delete Bucket CORS实现跨域访问配置删除。 -// -// https://www.qcloud.com/document/product/436/8283 -func (s *BucketService) DeleteCORS(ctx context.Context) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?cors", - method: http.MethodDelete, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} diff --git a/vendor/github.com/mozillazg/go-cos/bucket_lifecycle.go b/vendor/github.com/mozillazg/go-cos/bucket_lifecycle.go deleted file mode 100644 index 8631cec..0000000 --- a/vendor/github.com/mozillazg/go-cos/bucket_lifecycle.go +++ /dev/null @@ -1,103 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "net/http" -) - -// BucketLifecycleExpiration ... -type BucketLifecycleExpiration struct { - Date string `xml:"Date,omitempty"` - Days int `xml:"Days,omitempty"` -} - -// BucketLifecycleTransition ... -type BucketLifecycleTransition struct { - Date string `xml:"Date,omitempty"` - Days int `xml:"Days,omitempty"` - StorageClass string -} - -// BucketLifecycleAbortIncompleteMultipartUpload ... -type BucketLifecycleAbortIncompleteMultipartUpload struct { - DaysAfterInitiation string `xml:"DaysAfterInititation,omitempty"` -} - -// BucketLifecycleRule ... -type BucketLifecycleRule struct { - ID string `xml:"ID,omitempty"` - Prefix string - Status string - Transition *BucketLifecycleTransition `xml:"Transition,omitempty"` - Expiration *BucketLifecycleExpiration `xml:"Expiration,omitempty"` - AbortIncompleteMultipartUpload *BucketLifecycleAbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"` -} - -// BucketGetLifecycleResult ... -type BucketGetLifecycleResult struct { - XMLName xml.Name `xml:"LifecycleConfiguration"` - Rules []BucketLifecycleRule `xml:"Rule,omitempty"` -} - -// GetLifecycle ... -// -// Get Bucket Lifecycle请求实现读取生命周期管理的配置。当配置不存在时,返回404 Not Found。 -// -// (目前只支持华南园区) -// -// https://www.qcloud.com/document/product/436/8278 -func (s *BucketService) GetLifecycle(ctx context.Context) (*BucketGetLifecycleResult, *Response, error) { - var res BucketGetLifecycleResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?lifecycle", - method: http.MethodGet, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// BucketPutLifecycleOptions ... -type BucketPutLifecycleOptions struct { - XMLName xml.Name `xml:"LifecycleConfiguration"` - Rules []BucketLifecycleRule `xml:"Rule,omitempty"` -} - -// PutLifecycle ... -// -// Put Bucket Lifecycle请求实现设置生命周期管理的功能。您可以通过该请求实现数据的生命周期管理配置和定期删除。 -// -// 此请求为覆盖操作,上传新的配置文件将覆盖之前的配置文件。生命周期管理对文件和文件夹同时生效。 -// -// (目前只支持华南园区) -// -// https://www.qcloud.com/document/product/436/8280 -func (s *BucketService) PutLifecycle(ctx context.Context, opt *BucketPutLifecycleOptions) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?lifecycle", - method: http.MethodPut, - body: opt, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// DeleteLifecycle ... -// -// Delete Bucket Lifecycle请求实现删除生命周期管理。 -// -// (目前只支持华南园区) -// -// https://www.qcloud.com/document/product/436/8284 -func (s *BucketService) DeleteLifecycle(ctx context.Context) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?lifecycle", - method: http.MethodDelete, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} diff --git a/vendor/github.com/mozillazg/go-cos/bucket_location.go b/vendor/github.com/mozillazg/go-cos/bucket_location.go deleted file mode 100644 index 1875307..0000000 --- a/vendor/github.com/mozillazg/go-cos/bucket_location.go +++ /dev/null @@ -1,30 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "net/http" -) - -// BucketGetLocationResult ... -type BucketGetLocationResult struct { - XMLName xml.Name `xml:"LocationConstraint"` - Location string `xml:",chardata"` -} - -// GetLocation ... -// -// Get Bucket Location接口获取Bucket所在地域信息,只有Bucket所有者有权限读取信息。 -// -// https://www.qcloud.com/document/product/436/8275 -func (s *BucketService) GetLocation(ctx context.Context) (*BucketGetLocationResult, *Response, error) { - var res BucketGetLocationResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?location", - method: http.MethodGet, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} diff --git a/vendor/github.com/mozillazg/go-cos/bucket_part.go b/vendor/github.com/mozillazg/go-cos/bucket_part.go deleted file mode 100644 index 850a93e..0000000 --- a/vendor/github.com/mozillazg/go-cos/bucket_part.go +++ /dev/null @@ -1,59 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "net/http" -) - -// ListMultipartUploadsResult ... -type ListMultipartUploadsResult struct { - XMLName xml.Name `xml:"ListMultipartUploadsResult"` - Bucket string `xml:"Bucket"` - EncodingType string `xml:"Encoding-Type"` - KeyMarker string - UploadIDMarker string `xml:"UploadIdMarker"` - NextKeyMarker string - NextUploadIDMarker string `xml:"NextUploadIdMarker"` - MaxUploads int - IsTruncated bool - Uploads []struct { - Key string - UploadID string `xml:"UploadId"` - StorageClass string - Initiator *Initiator - Owner *Owner - Initiated string - } `xml:"Upload,omitempty"` - Prefix string - Delimiter string `xml:"delimiter,omitempty"` - CommonPrefixes []string `xml:"CommonPrefixs>Prefix,omitempty"` -} - -// ListMultipartUploadsOptions ... -type ListMultipartUploadsOptions struct { - Delimiter string `url:"delimiter,omitempty"` - EncodingType string `url:"encoding-type,omitempty"` - Prefix string `url:"prefix,omitempty"` - MaxUploads int `url:"max-uploads,omitempty"` - KeyMarker string `url:"key-marker,omitempty"` - UploadIDMarker string `url:"upload-id-marker,omitempty"` -} - -// ListMultipartUploads ... -// -// List Multipart Uploads用来查询正在进行中的分块上传。单次最多列出1000个正在进行中的分块上传。 -// -// https://www.qcloud.com/document/product/436/7736 -func (s *BucketService) ListMultipartUploads(ctx context.Context, opt *ListMultipartUploadsOptions) (*ListMultipartUploadsResult, *Response, error) { - var res ListMultipartUploadsResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?uploads", - method: http.MethodGet, - result: &res, - optQuery: opt, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} diff --git a/vendor/github.com/mozillazg/go-cos/bucket_tagging.go b/vendor/github.com/mozillazg/go-cos/bucket_tagging.go deleted file mode 100644 index d0845d3..0000000 --- a/vendor/github.com/mozillazg/go-cos/bucket_tagging.go +++ /dev/null @@ -1,75 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "net/http" -) - -// BucketTaggingTag ... -type BucketTaggingTag struct { - Key string - Value string -} - -// BucketGetTaggingResult ... -type BucketGetTaggingResult struct { - XMLName xml.Name `xml:"Tagging"` - TagSet []BucketTaggingTag `xml:"TagSet>Tag,omitempty"` -} - -// GetTagging ... -// -// Get Bucket Tagging接口实现获取指定Bucket的标签。 -// -// https://www.qcloud.com/document/product/436/8277 -func (s *BucketService) GetTagging(ctx context.Context) (*BucketGetTaggingResult, *Response, error) { - var res BucketGetTaggingResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?tagging", - method: http.MethodGet, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// BucketPutTaggingOptions ... -type BucketPutTaggingOptions struct { - XMLName xml.Name `xml:"Tagging"` - TagSet []BucketTaggingTag `xml:"TagSet>Tag,omitempty"` -} - -// PutTagging ... -// -// Put Bucket Tagging接口实现给用指定Bucket打标签。用来组织和管理相关Bucket。 -// -// 当该请求设置相同Key名称,不同Value时,会返回400。请求成功,则返回204。 -// -// https://www.qcloud.com/document/product/436/8281 -func (s *BucketService) PutTagging(ctx context.Context, opt *BucketPutTaggingOptions) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?tagging", - method: http.MethodPut, - body: opt, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// DeleteTagging ... -// -// Delete Bucket Tagging接口实现删除指定Bucket的标签。 -// -// https://www.qcloud.com/document/product/436/8286 -func (s *BucketService) DeleteTagging(ctx context.Context) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?tagging", - method: http.MethodDelete, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} diff --git a/vendor/github.com/mozillazg/go-cos/cos.go b/vendor/github.com/mozillazg/go-cos/cos.go deleted file mode 100644 index 3658656..0000000 --- a/vendor/github.com/mozillazg/go-cos/cos.go +++ /dev/null @@ -1,378 +0,0 @@ -package cos - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "text/template" - - "strconv" - - "github.com/google/go-querystring/query" - "github.com/mozillazg/go-httpheader" -) - -const ( - // Version ... - Version = "0.9.0" - userAgent = "go-cos/" + Version - contentTypeXML = "application/xml" - defaultServiceBaseURL = "https://service.cos.myqcloud.com" -) - -var bucketURLTemplate = template.Must( - template.New("bucketURLFormat").Parse( - "{{.Scheme}}://{{.BucketName}}-{{.AppID}}.cos.{{.Region}}.myqcloud.com", - ), -) - -// BaseURL 访问各 API 所需的基础 URL -type BaseURL struct { - // 访问 bucket, object 相关 API 的基础 URL(不包含 path 部分) - // 比如:https://test-1253846586.cos.ap-beijing.myqcloud.com - // 详见 https://cloud.tencent.com/document/product/436/6224 - BucketURL *url.URL - // 访问 service API 的基础 URL(不包含 path 部分) - // 比如:https://service.cos.myqcloud.com - ServiceURL *url.URL -} - -// NewBaseURL 生成 BaseURL -func NewBaseURL(bucketURL string) (u *BaseURL, err error) { - bu, err := url.Parse(bucketURL) - if err != nil { - return - } - su, _ := url.Parse(defaultServiceBaseURL) - u = &BaseURL{ - BucketURL: bu, - ServiceURL: su, - } - return -} - -// NewBucketURL 生成 BaseURL 所需的 BucketURL -// -// bucketName: bucket 名称 -// AppID: 应用 ID -// Region: 区域代码,详见 https://cloud.tencent.com/document/product/436/6224 -// secure: 是否使用 https -func NewBucketURL(bucketName, appID, region string, secure bool) *url.URL { - scheme := "https" - if !secure { - scheme = "http" - } - - w := bytes.NewBuffer(nil) - bucketURLTemplate.Execute(w, struct { - Scheme string - BucketName string - AppID string - Region string - }{ - scheme, bucketName, appID, region, - }) - - u, _ := url.Parse(w.String()) - return u -} - -// A Client manages communication with the COS API. -type Client struct { - client *http.Client - - UserAgent string - BaseURL *BaseURL - - common service - - Service *ServiceService - Bucket *BucketService - Object *ObjectService -} - -type service struct { - client *Client -} - -// NewClient returns a new COS API client. -func NewClient(uri *BaseURL, httpClient *http.Client) *Client { - if httpClient == nil { - httpClient = &http.Client{} - } - - baseURL := &BaseURL{} - if uri != nil { - baseURL.BucketURL = uri.BucketURL - baseURL.ServiceURL = uri.ServiceURL - } - if baseURL.ServiceURL == nil { - baseURL.ServiceURL, _ = url.Parse(defaultServiceBaseURL) - } - - c := &Client{ - client: httpClient, - UserAgent: userAgent, - BaseURL: baseURL, - } - c.common.client = c - c.Service = (*ServiceService)(&c.common) - c.Bucket = (*BucketService)(&c.common) - c.Object = (*ObjectService)(&c.common) - return c -} - -func (c *Client) newRequest(ctx context.Context, opt *sendOptions) (req *http.Request, err error) { - baseURL := opt.baseURL - uri := opt.uri - method := opt.method - body := opt.body - optQuery := opt.optQuery - optHeader := opt.optHeader - - uri, err = addURLOptions(uri, optQuery) - if err != nil { - return - } - u, _ := url.Parse(uri) - urlStr := baseURL.ResolveReference(u).String() - - var reader io.Reader - contentType := "" - contentMD5 := "" - xsha1 := "" - if body != nil { - // 上传文件 - if r, ok := body.(io.Reader); ok { - reader = r - } else { - b, err := xml.Marshal(body) - if err != nil { - return nil, err - } - contentType = contentTypeXML - reader = bytes.NewReader(b) - contentMD5 = base64.StdEncoding.EncodeToString(calMD5Digest(b)) - //xsha1 = base64.StdEncoding.EncodeToString(calSHA1Digest(b)) - } - } else { - contentType = contentTypeXML - } - - req, err = http.NewRequest(method, urlStr, reader) - if err != nil { - return - } - - req.Header, err = addHeaderOptions(req.Header, optHeader) - if err != nil { - return - } - if v := req.Header.Get("Content-Length"); req.ContentLength == 0 && v != "" && v != "0" { - req.ContentLength, _ = strconv.ParseInt(v, 10, 64) - } - - if contentMD5 != "" { - req.Header["Content-MD5"] = []string{contentMD5} - } - if xsha1 != "" { - req.Header.Set("x-cos-sha1", xsha1) - } - if c.UserAgent != "" { - req.Header.Set("User-Agent", c.UserAgent) - } - if req.Header.Get("Content-Type") == "" && contentType != "" { - req.Header.Set("Content-Type", contentType) - } - return -} - -func (c *Client) doAPI(ctx context.Context, req *http.Request, result interface{}, closeBody bool) (*Response, error) { - req = req.WithContext(ctx) - - resp, err := c.client.Do(req) - if err != nil { - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - return nil, err - } - - defer func() { - if closeBody { - // Close the body to let the Transport reuse the connection - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - } - }() - - response := newResponse(resp) - - err = checkResponse(resp) - if err != nil { - // even though there was an error, we still return the response - // in case the caller wants to inspect it further - return response, err - } - - if result != nil { - if w, ok := result.(io.Writer); ok { - io.Copy(w, resp.Body) - } else { - err = xml.NewDecoder(resp.Body).Decode(result) - if err == io.EOF { - err = nil // ignore EOF errors caused by empty response body - } - } - } - - return response, err -} - -type sendOptions struct { - // 基础 URL - baseURL *url.URL - // URL 中除基础 URL 外的剩余部分 - uri string - // 请求方法 - method string - - body interface{} - // url 查询参数 - optQuery interface{} - // http header 参数 - optHeader interface{} - // 用 result 反序列化 resp.Body - result interface{} - // 是否禁用自动调用 resp.Body.Close() - // 自动调用 Close() 是为了能够重用连接 - disableCloseBody bool -} - -func (c *Client) send(ctx context.Context, opt *sendOptions) (resp *Response, err error) { - req, err := c.newRequest(ctx, opt) - if err != nil { - return - } - - resp, err = c.doAPI(ctx, req, opt.result, !opt.disableCloseBody) - if err != nil { - return - } - return -} - -// addURLOptions adds the parameters in opt as URL query parameters to s. opt -// must be a struct whose fields may contain "url" tags. -func addURLOptions(s string, opt interface{}) (string, error) { - v := reflect.ValueOf(opt) - if v.Kind() == reflect.Ptr && v.IsNil() { - return s, nil - } - - u, err := url.Parse(s) - if err != nil { - return s, err - } - - qs, err := query.Values(opt) - if err != nil { - return s, err - } - - // 保留原有的参数,并且放在前面。因为 cos 的 url 路由是以第一个参数作为路由的 - // e.g. /?uploads - q := u.RawQuery - rq := qs.Encode() - if q != "" { - if rq != "" { - u.RawQuery = fmt.Sprintf("%s&%s", q, qs.Encode()) - } - } else { - u.RawQuery = rq - } - return u.String(), nil -} - -// addHeaderOptions adds the parameters in opt as Header fields to req. opt -// must be a struct whose fields may contain "header" tags. -func addHeaderOptions(header http.Header, opt interface{}) (http.Header, error) { - v := reflect.ValueOf(opt) - if v.Kind() == reflect.Ptr && v.IsNil() { - return header, nil - } - - h, err := httpheader.Header(opt) - if err != nil { - return nil, err - } - - for key, values := range h { - for _, value := range values { - header.Add(key, value) - } - } - return header, nil -} - -// Owner ... -type Owner struct { - UIN string `xml:"uin,omitempty"` - ID string `xml:",omitempty"` - DisplayName string `xml:",omitempty"` -} - -// Initiator ... -type Initiator Owner - -// Response API 响应 -type Response struct { - *http.Response -} - -func newResponse(resp *http.Response) *Response { - return &Response{ - Response: resp, - } -} - -// ACLHeaderOptions ... -type ACLHeaderOptions struct { - XCosACL string `header:"x-cos-acl,omitempty" url:"-" xml:"-"` - XCosGrantRead string `header:"x-cos-grant-read,omitempty" url:"-" xml:"-"` - XCosGrantWrite string `header:"x-cos-grant-write,omitempty" url:"-" xml:"-"` - XCosGrantFullControl string `header:"x-cos-grant-full-control,omitempty" url:"-" xml:"-"` -} - -// ACLGrantee ... -type ACLGrantee struct { - Type string `xml:"type,attr"` - UIN string `xml:"uin,omitempty"` - ID string `xml:",omitempty"` - DisplayName string `xml:",omitempty"` - SubAccount string `xml:"Subaccount,omitempty"` -} - -// ACLGrant ... -type ACLGrant struct { - Grantee *ACLGrantee - Permission string -} - -// ACLXml ... -type ACLXml struct { - XMLName xml.Name `xml:"AccessControlPolicy"` - Owner *Owner - AccessControlList []ACLGrant `xml:"AccessControlList>Grant,omitempty"` -} diff --git a/vendor/github.com/mozillazg/go-cos/doc.go b/vendor/github.com/mozillazg/go-cos/doc.go deleted file mode 100644 index 54b415e..0000000 --- a/vendor/github.com/mozillazg/go-cos/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Package cos 腾讯云对象存储服务 COS(Cloud Object Storage) Go SDK。 - - -COS API Version - -封装了 V5 版本的 XML API 。 - - -Usage - -在项目的 _example 目录下有各个 API 的使用示例 。 - - -Authentication - -默认所有 API 都是匿名访问. 如果想添加认证信息的话,可以通过自定义一个 http.Client 来添加认证信息. - -比如, 使用内置的 AuthorizationTransport 来为请求增加 Authorization Header 签名信息: - - client := cos.NewClient(b, &http.Client{ - Transport: &cos.AuthorizationTransport{ - SecretID: "COS_SECRETID", - SecretKey: "COS_SECRETKEY", - }, - }) - -*/ -package cos diff --git a/vendor/github.com/mozillazg/go-cos/error.go b/vendor/github.com/mozillazg/go-cos/error.go deleted file mode 100644 index 0c284aa..0000000 --- a/vendor/github.com/mozillazg/go-cos/error.go +++ /dev/null @@ -1,41 +0,0 @@ -package cos - -import ( - "encoding/xml" - "fmt" - "io/ioutil" - "net/http" -) - -// ErrorResponse 包含 API 返回的错误信息 -// -// https://www.qcloud.com/document/product/436/7730 -type ErrorResponse struct { - XMLName xml.Name `xml:"Error"` - Response *http.Response `xml:"-"` - Code string - Message string - Resource string - RequestID string `xml:"RequestId"` - TraceID string `xml:"TraceId,omitempty"` -} - -// Error ... -func (r *ErrorResponse) Error() string { - return fmt.Sprintf("%v %v: %d %v(Message: %v, RequestId: %v, TraceId: %v)", - r.Response.Request.Method, r.Response.Request.URL, - r.Response.StatusCode, r.Code, r.Message, r.RequestID, r.TraceID) -} - -// 检查 response 是否是出错时的返回的 response -func checkResponse(r *http.Response) error { - if c := r.StatusCode; 200 <= c && c <= 299 { - return nil - } - errorResponse := &ErrorResponse{Response: r} - data, err := ioutil.ReadAll(r.Body) - if err == nil && data != nil { - xml.Unmarshal(data, errorResponse) - } - return errorResponse -} diff --git a/vendor/github.com/mozillazg/go-cos/helper.go b/vendor/github.com/mozillazg/go-cos/helper.go deleted file mode 100644 index d947ede..0000000 --- a/vendor/github.com/mozillazg/go-cos/helper.go +++ /dev/null @@ -1,85 +0,0 @@ -package cos - -import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "fmt" - "net/http" -) - -// 计算 md5 或 sha1 时的分块大小 -const calDigestBlockSize = 1024 * 1024 * 10 - -func calMD5Digest(msg []byte) []byte { - // TODO: 分块计算,减少内存消耗 - m := md5.New() - m.Write(msg) - return m.Sum(nil) -} - -func calSHA1Digest(msg []byte) []byte { - // TODO: 分块计算,减少内存消耗 - m := sha1.New() - m.Write(msg) - return m.Sum(nil) -} - -// cloneRequest returns a clone of the provided *http.Request. The clone is a -// shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} - -// encodeURIComponent like same function in javascript -// -// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent -// -// http://www.ecma-international.org/ecma-262/6.0/#sec-uri-syntax-and-semantics -func encodeURIComponent(s string) string { - var b bytes.Buffer - written := 0 - - for i, n := 0, len(s); i < n; i++ { - c := s[i] - - switch c { - case '-', '_', '.', '!', '~', '*', '\'', '(', ')': - continue - default: - // Unreserved according to RFC 3986 sec 2.3 - if 'a' <= c && c <= 'z' { - - continue - - } - if 'A' <= c && c <= 'Z' { - - continue - - } - if '0' <= c && c <= '9' { - - continue - } - } - - b.WriteString(s[written:i]) - fmt.Fprintf(&b, "%%%02x", c) - written = i + 1 - } - - if written == 0 { - return s - } - b.WriteString(s[written:]) - return b.String() -} diff --git a/vendor/github.com/mozillazg/go-cos/object.go b/vendor/github.com/mozillazg/go-cos/object.go deleted file mode 100644 index 11f530a..0000000 --- a/vendor/github.com/mozillazg/go-cos/object.go +++ /dev/null @@ -1,339 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" -) - -// ObjectService ... -// -// Object 相关 API -type ObjectService service - -// ObjectGetOptions ... -type ObjectGetOptions struct { - ResponseContentType string `url:"response-content-type,omitempty" header:"-"` - ResponseContentLanguage string `url:"response-content-language,omitempty" header:"-"` - ResponseExpires string `url:"response-expires,omitempty" header:"-"` - ResponseCacheControl string `url:"response-cache-control,omitempty" header:"-"` - ResponseContentDisposition string `url:"response-content-disposition,omitempty" header:"-"` - ResponseContentEncoding string `url:"response-content-encoding,omitempty" header:"-"` - Range string `url:"-" header:"Range,omitempty"` - IfModifiedSince string `url:"-" header:"If-Modified-Since,omitempty"` - - // 预签名授权 URL - PresignedURL *url.URL `header:"-" url:"-" xml:"-"` -} - -// Get Object 请求可以将一个文件(Object)下载至本地。 -// 该操作需要对目标 Object 具有读权限或目标 Object 对所有人都开放了读权限(公有读)。 -// -// https://www.qcloud.com/document/product/436/7753 -func (s *ObjectService) Get(ctx context.Context, name string, opt *ObjectGetOptions) (*Response, error) { - baseURL := s.client.BaseURL.BucketURL - uri := "/" + encodeURIComponent(name) - if opt != nil && opt.PresignedURL != nil { - baseURL = opt.PresignedURL - uri = "" - } - sendOpt := sendOptions{ - baseURL: baseURL, - uri: uri, - method: http.MethodGet, - optQuery: opt, - optHeader: opt, - disableCloseBody: true, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// ObjectPutHeaderOptions ... -type ObjectPutHeaderOptions struct { - CacheControl string `header:"Cache-Control,omitempty" url:"-"` - ContentDisposition string `header:"Content-Disposition,omitempty" url:"-"` - ContentEncoding string `header:"Content-Encoding,omitempty" url:"-"` - ContentType string `header:"Content-Type,omitempty" url:"-"` - ContentLength int `header:"Content-Length,omitempty" url:"-"` - Expect string `header:"Expect,omitempty" url:"-"` - Expires string `header:"Expires,omitempty" url:"-"` - XCosContentSHA1 string `header:"x-cos-content-sha1,omitempty" url:"-"` - // 自定义的 x-cos-meta-* header - XCosMetaXXX *http.Header `header:"x-cos-meta-*,omitempty" url:"-"` - XCosStorageClass string `header:"x-cos-storage-class,omitempty" url:"-"` - // 可选值: Normal, Appendable - //XCosObjectType string `header:"x-cos-object-type,omitempty" url:"-"` -} - -// ObjectPutOptions ... -type ObjectPutOptions struct { - *ACLHeaderOptions `header:",omitempty" url:"-" xml:"-"` - *ObjectPutHeaderOptions `header:",omitempty" url:"-" xml:"-"` - - // 预签名授权 URL - PresignedURL *url.URL `header:"-" url:"-" xml:"-"` -} - -// Put Object请求可以将一个文件(Oject)上传至指定Bucket。 -// -// 当 r 不是 bytes.Buffer/bytes.Reader/strings.Reader 时,必须指定 opt.ObjectPutHeaderOptions.ContentLength -// -// https://www.qcloud.com/document/product/436/7749 -func (s *ObjectService) Put(ctx context.Context, name string, r io.Reader, opt *ObjectPutOptions) (*Response, error) { - baseURL := s.client.BaseURL.BucketURL - uri := "/" + encodeURIComponent(name) - if opt != nil && opt.PresignedURL != nil { - baseURL = opt.PresignedURL - uri = "" - } - sendOpt := sendOptions{ - baseURL: baseURL, - uri: uri, - method: http.MethodPut, - body: r, - optHeader: opt, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// ObjectCopyHeaderOptions ... -type ObjectCopyHeaderOptions struct { - XCosMetadataDirective string `header:"x-cos-metadata-directive,omitempty" url:"-" xml:"-"` - XCosCopySourceIfModifiedSince string `header:"x-cos-copy-source-If-Modified-Since,omitempty" url:"-" xml:"-"` - XCosCopySourceIfUnmodifiedSince string `header:"x-cos-copy-source-If-Unmodified-Since,omitempty" url:"-" xml:"-"` - XCosCopySourceIfMatch string `header:"x-cos-copy-source-If-Match,omitempty" url:"-" xml:"-"` - XCosCopySourceIfNoneMatch string `header:"x-cos-copy-source-If-None-Match,omitempty" url:"-" xml:"-"` - XCosStorageClass string `header:"x-cos-storage-class,omitempty" url:"-" xml:"-"` - // 自定义的 x-cos-meta-* header - XCosMetaXXX *http.Header `header:"x-cos-meta-*,omitempty" url:"-"` - XCosCopySource string `header:"x-cos-copy-source" url:"-" xml:"-"` -} - -// ObjectCopyOptions ... -type ObjectCopyOptions struct { - *ObjectCopyHeaderOptions `header:",omitempty" url:"-" xml:"-"` - *ACLHeaderOptions `header:",omitempty" url:"-" xml:"-"` -} - -// ObjectCopyResult ... -type ObjectCopyResult struct { - XMLName xml.Name `xml:"CopyObjectResult"` - ETag string `xml:"ETag,omitempty"` - LastModified string `xml:"LastModified,omitempty"` -} - -// Copy ... -// Put Object Copy 请求实现将一个文件从源路径复制到目标路径。建议文件大小 1M 到 5G, -// 超过 5G 的文件请使用分块上传 Upload - Copy。在拷贝的过程中,文件元属性和 ACL 可以被修改。 -// -// 用户可以通过该接口实现文件移动,文件重命名,修改文件属性和创建副本。 -// -// 注意:在跨帐号复制的时候,需要先设置被复制文件的权限为公有读,或者对目标帐号赋权,同帐号则不需要。 -// -// https://cloud.tencent.com/document/product/436/10881 -func (s *ObjectService) Copy(ctx context.Context, name, sourceURL string, opt *ObjectCopyOptions) (*ObjectCopyResult, *Response, error) { - var res ObjectCopyResult - if opt == nil { - opt = new(ObjectCopyOptions) - } - if opt.ObjectCopyHeaderOptions == nil { - opt.ObjectCopyHeaderOptions = new(ObjectCopyHeaderOptions) - } - opt.XCosCopySource = sourceURL - - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/" + encodeURIComponent(name), - method: http.MethodPut, - body: nil, - optHeader: opt, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// Delete Object请求可以将一个文件(Object)删除。 -// -// https://www.qcloud.com/document/product/436/7743 -func (s *ObjectService) Delete(ctx context.Context, name string) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/" + encodeURIComponent(name), - method: http.MethodDelete, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// ObjectHeadOptions ... -type ObjectHeadOptions struct { - IfModifiedSince string `url:"-" header:"If-Modified-Since,omitempty"` -} - -// Head Object请求可以取回对应Object的元数据,Head的权限与Get的权限一致 -// -// https://www.qcloud.com/document/product/436/7745 -func (s *ObjectService) Head(ctx context.Context, name string, opt *ObjectHeadOptions) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/" + encodeURIComponent(name), - method: http.MethodHead, - optHeader: opt, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// ObjectOptionsOptions ... -type ObjectOptionsOptions struct { - Origin string `url:"-" header:"Origin"` - AccessControlRequestMethod string `url:"-" header:"Access-Control-Request-Method"` - AccessControlRequestHeaders string `url:"-" header:"Access-Control-Request-Headers,omitempty"` -} - -// Options Object请求实现跨域访问的预请求。即发出一个 OPTIONS 请求给服务器以确认是否可以进行跨域操作。 -// -// 当CORS配置不存在时,请求返回403 Forbidden。 -// -// https://www.qcloud.com/document/product/436/8288 -func (s *ObjectService) Options(ctx context.Context, name string, opt *ObjectOptionsOptions) (*Response, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/" + encodeURIComponent(name), - method: http.MethodOptions, - optHeader: opt, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// Append ... -// -// Append请求可以将一个文件(Object)以分块追加的方式上传至 Bucket 中。使用Append Upload的文件必须事前被设定为Appendable。 -// 当Appendable的文件被执行Put Object的操作以后,文件被覆盖,属性改变为Normal。 -// -// 文件属性可以在Head Object操作中被查询到,当您发起Head Object请求时,会返回自定义Header『x-cos-object-type』,该Header只有两个枚举值:Normal或者Appendable。 -// -// 追加上传建议文件大小1M - 5G。如果position的值和当前Object的长度不致,COS会返回409错误。 -// 如果Append一个Normal的Object,COS会返回409 ObjectNotAppendable。 -// -// Appendable的文件不可以被复制,不参与版本管理,不参与生命周期管理,不可跨区域复制。 -// -// 当 r 不是 bytes.Buffer/bytes.Reader/strings.Reader 时,必须指定 opt.ObjectPutHeaderOptions.ContentLength -// -// https://www.qcloud.com/document/product/436/7741 -func (s *ObjectService) Append(ctx context.Context, name string, position int, r io.Reader, opt *ObjectPutOptions) (*Response, error) { - u := fmt.Sprintf("/%s?append&position=%d", encodeURIComponent(name), position) - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: u, - method: http.MethodPost, - optHeader: opt, - body: r, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// ObjectDeleteMultiOptions ... -type ObjectDeleteMultiOptions struct { - XMLName xml.Name `xml:"Delete" header:"-"` - Quiet bool `xml:"Quiet" header:"-"` - Objects []Object `xml:"Object" header:"-"` - //XCosSha1 string `xml:"-" header:"x-cos-sha1"` -} - -// ObjectDeleteMultiResult ... -type ObjectDeleteMultiResult struct { - XMLName xml.Name `xml:"DeleteResult"` - DeletedObjects []Object `xml:"Deleted,omitempty"` - Errors []struct { - Key string - Code string - Message string - } `xml:"Error,omitempty"` -} - -// DeleteMulti ... -// -// Delete Multiple Object请求实现批量删除文件,最大支持单次删除1000个文件。 -// 对于返回结果,COS提供Verbose和Quiet两种结果模式。Verbose模式将返回每个Object的删除结果; -// Quiet模式只返回报错的Object信息。 -// -// 此请求必须携带x-cos-sha1用来校验Body的完整性。 -// -// https://www.qcloud.com/document/product/436/8289 -func (s *ObjectService) DeleteMulti(ctx context.Context, opt *ObjectDeleteMultiOptions) (*ObjectDeleteMultiResult, *Response, error) { - var res ObjectDeleteMultiResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/?delete", - method: http.MethodPost, - body: opt, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -type objectPresignedURLTestingOptions struct { - authTime *AuthTime -} - -// PresignedURL 生成预签名授权 URL,可用于无需知道 SecretID 和 SecretKey 就可以上传和下载文件 。 -// -// httpMethod: -// * 下载文件:http.MethodGet -// * 上传文件: http.MethodPut -// -// https://cloud.tencent.com/document/product/436/14116 -// https://cloud.tencent.com/document/product/436/14114 -func (s *ObjectService) PresignedURL(ctx context.Context, httpMethod, name string, auth Auth, opt interface{}) (*url.URL, error) { - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/" + encodeURIComponent(name), - method: httpMethod, - optQuery: opt, - optHeader: opt, - } - req, err := s.client.newRequest(ctx, &sendOpt) - if err != nil { - return nil, err - } - - var authTime *AuthTime - if opt != nil { - if opt, ok := opt.(*objectPresignedURLTestingOptions); ok { - authTime = opt.authTime - } - } - if authTime == nil { - authTime = NewAuthTime(auth.Expire) - } - authorization := newAuthorization(auth, req, *authTime) - sign := encodeURIComponent(authorization) - - if req.URL.RawQuery == "" { - req.URL.RawQuery = fmt.Sprintf("sign=%s", sign) - } else { - req.URL.RawQuery = fmt.Sprintf("%s&sign=%s", req.URL.RawQuery, sign) - } - return req.URL, nil -} - -// Object ... -type Object struct { - Key string `xml:",omitempty"` - ETag string `xml:",omitempty"` - Size int `xml:",omitempty"` - PartNumber int `xml:",omitempty"` - LastModified string `xml:",omitempty"` - StorageClass string `xml:",omitempty"` - Owner *Owner `xml:",omitempty"` -} diff --git a/vendor/github.com/mozillazg/go-cos/object_acl.go b/vendor/github.com/mozillazg/go-cos/object_acl.go deleted file mode 100644 index c17b884..0000000 --- a/vendor/github.com/mozillazg/go-cos/object_acl.go +++ /dev/null @@ -1,63 +0,0 @@ -package cos - -import ( - "context" - "net/http" -) - -// ObjectGetACLResult ... -type ObjectGetACLResult ACLXml - -// GetACL Get Object ACL接口实现使用API读取Object的ACL表,只有所有者有权操作。 -// -// https://www.qcloud.com/document/product/436/7744 -func (s *ObjectService) GetACL(ctx context.Context, name string) (*ObjectGetACLResult, *Response, error) { - var res ObjectGetACLResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/" + encodeURIComponent(name) + "?acl", - method: http.MethodGet, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// ObjectPutACLOptions ... -type ObjectPutACLOptions struct { - Header *ACLHeaderOptions `url:"-" xml:"-"` - Body *ACLXml `url:"-" header:"-"` -} - -// PutACL 使用API写入Object的ACL表,您可以通过Header:"x-cos-acl", "x-cos-grant-read" , -// "x-cos-grant-write" ,"x-cos-grant-full-control"传入ACL信息, -// 也可以通过body以XML格式传入ACL信息,但是只能选择Header和Body其中一种,否则,返回冲突。 -// -// Put Object ACL是一个覆盖操作,传入新的ACL将覆盖原有ACL。只有所有者有权操作。 -// -// "x-cos-acl":枚举值为public-read,private;public-read意味这个Object有公有读私有写的权限, -// private意味这个Object有私有读写的权限。 -// -// "x-cos-grant-read":意味被赋予权限的用户拥有该Object的读权限 -// -// "x-cos-grant-write":意味被赋予权限的用户拥有该Object的写权限 -// -// "x-cos-grant-full-control":意味被赋予权限的用户拥有该Object的读写权限 -// -// https://www.qcloud.com/document/product/436/7748 -func (s *ObjectService) PutACL(ctx context.Context, name string, opt *ObjectPutACLOptions) (*Response, error) { - header := opt.Header - body := opt.Body - if body != nil { - header = nil - } - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/" + encodeURIComponent(name) + "?acl", - method: http.MethodPut, - optHeader: header, - body: body, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} diff --git a/vendor/github.com/mozillazg/go-cos/object_part.go b/vendor/github.com/mozillazg/go-cos/object_part.go deleted file mode 100644 index f8d66cc..0000000 --- a/vendor/github.com/mozillazg/go-cos/object_part.go +++ /dev/null @@ -1,177 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "fmt" - "io" - "net/http" -) - -// InitiateMultipartUploadOptions ... -type InitiateMultipartUploadOptions struct { - *ACLHeaderOptions - *ObjectPutHeaderOptions -} - -// InitiateMultipartUploadResult ... -type InitiateMultipartUploadResult struct { - XMLName xml.Name `xml:"InitiateMultipartUploadResult"` - Bucket string - Key string - UploadID string `xml:"UploadId"` -} - -// InitiateMultipartUpload ... -// -// Initiate Multipart Upload请求实现初始化分片上传,成功执行此请求以后会返回Upload ID用于后续的Upload Part请求。 -// -// https://www.qcloud.com/document/product/436/7746 -func (s *ObjectService) InitiateMultipartUpload(ctx context.Context, name string, opt *InitiateMultipartUploadOptions) (*InitiateMultipartUploadResult, *Response, error) { - var res InitiateMultipartUploadResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: "/" + encodeURIComponent(name) + "?uploads", - method: http.MethodPost, - optHeader: opt, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// ObjectUploadPartOptions ... -type ObjectUploadPartOptions struct { - Expect string `header:"Expect,omitempty" url:"-"` - XCosContentSHA1 string `header:"x-cos-content-sha1" url:"-"` - ContentLength int `header:"Content-Length,omitempty" url:"-"` -} - -// UploadPart ... -// -// Upload Part请求实现在初始化以后的分块上传,支持的块的数量为1到10000,块的大小为1 MB 到5 GB。 -// 在每次请求Upload Part时候,需要携带partNumber和uploadID,partNumber为块的编号,支持乱序上传。 -// -// 当传入uploadID和partNumber都相同的时候,后传入的块将覆盖之前传入的块。当uploadID不存在时会返回404错误,NoSuchUpload. -// -// 当 r 不是 bytes.Buffer/bytes.Reader/strings.Reader 时,必须指定 opt.ContentLength -// -// https://www.qcloud.com/document/product/436/7750 -func (s *ObjectService) UploadPart(ctx context.Context, name, uploadID string, partNumber int, r io.Reader, opt *ObjectUploadPartOptions) (*Response, error) { - u := fmt.Sprintf("/%s?partNumber=%d&uploadId=%s", encodeURIComponent(name), partNumber, uploadID) - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: u, - method: http.MethodPut, - optHeader: opt, - body: r, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} - -// ObjectListPartsOptions ... -type ObjectListPartsOptions struct { - EncodingType string `url:"Encoding-type,omitempty"` - MaxParts int `url:"max-parts,omitempty"` - PartNumberMarker int `url:"part-number-marker,omitempty"` -} - -// ObjectListPartsResult ... -type ObjectListPartsResult struct { - XMLName xml.Name `xml:"ListPartsResult"` - Bucket string - EncodingType string `xml:"Encoding-type,omitempty"` - Key string - UploadID string `xml:"UploadId"` - Initiator *Initiator `xml:"Initiator,omitempty"` - Owner *Owner `xml:"Owner,omitempty"` - StorageClass string - PartNumberMarker int - NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"` - MaxParts int - IsTruncated bool - Parts []Object `xml:"Part,omitempty"` -} - -// ListParts ... -// -// List Parts用来查询特定分块上传中的已上传的块。 -// -// https://www.qcloud.com/document/product/436/7747 -func (s *ObjectService) ListParts(ctx context.Context, name, uploadID string) (*ObjectListPartsResult, *Response, error) { - u := fmt.Sprintf("/%s?uploadId=%s", encodeURIComponent(name), uploadID) - var res ObjectListPartsResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: u, - method: http.MethodGet, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// CompleteMultipartUploadOptions ... -type CompleteMultipartUploadOptions struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts []Object `xml:"Part"` -} - -// CompleteMultipartUploadResult ... -type CompleteMultipartUploadResult struct { - XMLName xml.Name `xml:"CompleteMultipartUploadResult"` - Location string - Bucket string - Key string - ETag string -} - -// CompleteMultipartUpload ... -// -// Complete Multipart Upload用来实现完成整个分块上传。当您已经使用Upload Parts上传所有块以后,你可以用该API完成上传。 -// 在使用该API时,您必须在Body中给出每一个块的PartNumber和ETag,用来校验块的准确性。 -// -// 由于分块上传的合并需要数分钟时间,因而当合并分块开始的时候,COS就立即返回200的状态码,在合并的过程中, -// COS会周期性的返回空格信息来保持连接活跃,直到合并完成,COS会在Body中返回合并后块的内容。 -// -// 当上传块小于1 MB的时候,在调用该请求时,会返回400 EntityTooSmall; -// 当上传块编号不连续的时候,在调用该请求时,会返回400 InvalidPart; -// 当请求Body中的块信息没有按序号从小到大排列的时候,在调用该请求时,会返回400 InvalidPartOrder; -// 当UploadId不存在的时候,在调用该请求时,会返回404 NoSuchUpload。 -// -// 建议您及时完成分块上传或者舍弃分块上传,因为已上传但是未终止的块会占用存储空间进而产生存储费用。 -// -// https://www.qcloud.com/document/product/436/7742 -func (s *ObjectService) CompleteMultipartUpload(ctx context.Context, name, uploadID string, opt *CompleteMultipartUploadOptions) (*CompleteMultipartUploadResult, *Response, error) { - u := fmt.Sprintf("/%s?uploadId=%s", encodeURIComponent(name), uploadID) - var res CompleteMultipartUploadResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: u, - method: http.MethodPost, - body: opt, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} - -// AbortMultipartUpload ... -// -// Abort Multipart Upload用来实现舍弃一个分块上传并删除已上传的块。当您调用Abort Multipart Upload时, -// 如果有正在使用这个Upload Parts上传块的请求,则Upload Parts会返回失败。当该UploadID不存在时,会返回404 NoSuchUpload。 -// -// 建议您及时完成分块上传或者舍弃分块上传,因为已上传但是未终止的块会占用存储空间进而产生存储费用。 -// -// https://www.qcloud.com/document/product/436/7740 -func (s *ObjectService) AbortMultipartUpload(ctx context.Context, name, uploadID string) (*Response, error) { - u := fmt.Sprintf("/%s?uploadId=%s", encodeURIComponent(name), uploadID) - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.BucketURL, - uri: u, - method: http.MethodDelete, - } - resp, err := s.client.send(ctx, &sendOpt) - return resp, err -} diff --git a/vendor/github.com/mozillazg/go-cos/service.go b/vendor/github.com/mozillazg/go-cos/service.go deleted file mode 100644 index 53ccfe1..0000000 --- a/vendor/github.com/mozillazg/go-cos/service.go +++ /dev/null @@ -1,37 +0,0 @@ -package cos - -import ( - "context" - "encoding/xml" - "net/http" -) - -// ServiceService ... -// -// Service 相关 API -type ServiceService service - -// ServiceGetResult ... -type ServiceGetResult struct { - XMLName xml.Name `xml:"ListAllMyBucketsResult"` - Owner *Owner `xml:"Owner"` - Buckets []Bucket `xml:"Buckets>Bucket,omitempty"` -} - -// Get Service 接口实现获取该用户下所有Bucket列表。 -// -// 该API接口需要使用Authorization签名认证, -// 且只能获取签名中AccessID所属账户的Bucket列表。 -// -// https://www.qcloud.com/document/product/436/8291 -func (s *ServiceService) Get(ctx context.Context) (*ServiceGetResult, *Response, error) { - var res ServiceGetResult - sendOpt := sendOptions{ - baseURL: s.client.BaseURL.ServiceURL, - uri: "/", - method: http.MethodGet, - result: &res, - } - resp, err := s.client.send(ctx, &sendOpt) - return &res, resp, err -} diff --git a/vendor/github.com/qiniu/api.v7/CHANGELOG.md b/vendor/github.com/qiniu/api.v7/CHANGELOG.md deleted file mode 100644 index be03721..0000000 --- a/vendor/github.com/qiniu/api.v7/CHANGELOG.md +++ /dev/null @@ -1,65 +0,0 @@ -# Changelog - -# 7.2.4 (2018-03-01) -* 增加新加坡机房,新机房上线 -* 增加异步fetch的功能 -* 修复构建访问外链时兼容域名带/后缀 -* 默认开启crc32校验功能,表单+分片上传 -* 使用go内置的context功能 -* 修复qiniu rpc并发上传共用token的bug -* 增加七牛云rtc服务端功能 - -# 7.2.3 (2017-09-25) -* 增加Qiniu的鉴权方式 -* 删除prefop域名检测功能 -* 暴露分片上传的接口以支持复杂的自定义业务逻辑 - -## 7.2.2 (2017-09-19) -* 为表单上传和分片上传增加代理支持 -* 优化表单上传的crc32计算方式,减少内存消耗 -* 增加网页图片的Base64上传方式 - -## 7.2.1 (2017-08-20) -* 设置FormUpload默认支持crc32校验 -* ResumeUpload从API层面即支持crc32校验 - -## 7.2.0 (2017-07-28) -* 重构了v7 SDK的所有代码 - -## 7.1.0 (2016-6-22) - -### 增加 -* 增加多机房相关功能 - -## 7.0.5 (2015-11-20) - -### 增加 -* add delimiter support to Bucket.List -* 增加回调校验 - -## 7.0.4 (2015-09-03) - -### 增加 -* 上传返回参数PutRet增加PersistentId,用于获取上传对应的fop操作的id - -### 修复 -* token 覆盖问题 - -## 7.0.3 (2015-07-11) - -### 增加 -* support NestedObject - -## 7.0.2 (2015-07-7-10) - -### 增加 -* 增加跨空间移动文件(Bucket.MoveEx) - -## 7.0.1 (2015-07-7-10) - -### 增加 -* 完善 PutPolicy:支持 MimeLimit、CallbackHost、CallbackFetchKey、 CallbackBodyType、 Checksum - -## 7.0.0 (2016-06-29) - -* 重构,初始版本 diff --git a/vendor/github.com/qiniu/api.v7/Makefile b/vendor/github.com/qiniu/api.v7/Makefile deleted file mode 100644 index 4b3f0c0..0000000 --- a/vendor/github.com/qiniu/api.v7/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -test: - go test -v ./auth/... - go test -v ./conf/... - go test -v ./cdn/... - go test -v ./storage/... - go test -v ./rtc/... diff --git a/vendor/github.com/qiniu/api.v7/README.md b/vendor/github.com/qiniu/api.v7/README.md deleted file mode 100644 index a549ad0..0000000 --- a/vendor/github.com/qiniu/api.v7/README.md +++ /dev/null @@ -1,20 +0,0 @@ -github.com/qiniu/api.v7 (Qiniu Go SDK v7.x) -=============== - -[![Build Status](https://travis-ci.org/qiniu/api.v7.svg?branch=master)](https://travis-ci.org/qiniu/api.v7) [![GoDoc](https://godoc.org/github.com/qiniu/api.v7?status.svg)](https://godoc.org/github.com/qiniu/api.v7) - -[![Qiniu Logo](http://open.qiniudn.com/logo.png)](http://qiniu.com/) - -# 下载 - -``` -go get -u github.com/qiniu/api.v7 -``` - -# 文档 - -[七牛SDK文档站](https://developer.qiniu.com/kodo/sdk/1238/go) 或者 [项目WIKI](https://github.com/qiniu/api.v7/wiki) - -# 示例 - -[参考代码](https://github.com/qiniu/api.v7/tree/master/examples) diff --git a/vendor/github.com/qiniu/api.v7/auth/qbox/doc.go b/vendor/github.com/qiniu/api.v7/auth/qbox/doc.go deleted file mode 100644 index aa1de15..0000000 --- a/vendor/github.com/qiniu/api.v7/auth/qbox/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// qbox 包提供了该SDK需要的相关鉴权方法 -package qbox diff --git a/vendor/github.com/qiniu/api.v7/auth/qbox/qbox_auth.go b/vendor/github.com/qiniu/api.v7/auth/qbox/qbox_auth.go deleted file mode 100644 index 040f812..0000000 --- a/vendor/github.com/qiniu/api.v7/auth/qbox/qbox_auth.go +++ /dev/null @@ -1,147 +0,0 @@ -package qbox - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "fmt" - "io" - "net/http" - - "github.com/qiniu/api.v7/conf" - "github.com/qiniu/x/bytes.v7/seekable" -) - -// Mac 七牛AK/SK的对象,AK/SK可以从 https://portal.qiniu.com/user/key 获取。 -type Mac struct { - AccessKey string - SecretKey []byte -} - -// NewMac 构建一个新的拥有AK/SK的对象 -func NewMac(accessKey, secretKey string) (mac *Mac) { - return &Mac{accessKey, []byte(secretKey)} -} - -// Sign 对数据进行签名,一般用于私有空间下载用途 -func (mac *Mac) Sign(data []byte) (token string) { - h := hmac.New(sha1.New, mac.SecretKey) - h.Write(data) - - sign := base64.URLEncoding.EncodeToString(h.Sum(nil)) - return fmt.Sprintf("%s:%s", mac.AccessKey, sign) -} - -// SignWithData 对数据进行签名,一般用于上传凭证的生成用途 -func (mac *Mac) SignWithData(b []byte) (token string) { - encodedData := base64.URLEncoding.EncodeToString(b) - h := hmac.New(sha1.New, mac.SecretKey) - h.Write([]byte(encodedData)) - digest := h.Sum(nil) - sign := base64.URLEncoding.EncodeToString(digest) - return fmt.Sprintf("%s:%s:%s", mac.AccessKey, sign, encodedData) -} - -// SignRequest 对数据进行签名,一般用于管理凭证的生成 -func (mac *Mac) SignRequest(req *http.Request) (token string, err error) { - h := hmac.New(sha1.New, mac.SecretKey) - - u := req.URL - data := u.Path - if u.RawQuery != "" { - data += "?" + u.RawQuery - } - io.WriteString(h, data+"\n") - - if incBody(req) { - s2, err2 := seekable.New(req) - if err2 != nil { - return "", err2 - } - h.Write(s2.Bytes()) - } - - sign := base64.URLEncoding.EncodeToString(h.Sum(nil)) - token = fmt.Sprintf("%s:%s", mac.AccessKey, sign) - return -} - -// SignRequestV2 对数据进行签名,一般用于高级管理凭证的生成 -func (mac *Mac) SignRequestV2(req *http.Request) (token string, err error) { - h := hmac.New(sha1.New, mac.SecretKey) - - u := req.URL - - //write method path?query - io.WriteString(h, fmt.Sprintf("%s %s", req.Method, u.Path)) - if u.RawQuery != "" { - io.WriteString(h, "?") - io.WriteString(h, u.RawQuery) - } - - //write host and post - io.WriteString(h, "\nHost: ") - io.WriteString(h, req.Host) - - //write content type - contentType := req.Header.Get("Content-Type") - if contentType != "" { - io.WriteString(h, "\n") - io.WriteString(h, fmt.Sprintf("Content-Type: %s", contentType)) - } - - io.WriteString(h, "\n\n") - - //write body - if incBodyV2(req) { - s2, err2 := seekable.New(req) - if err2 != nil { - return "", err2 - } - h.Write(s2.Bytes()) - } - - sign := base64.URLEncoding.EncodeToString(h.Sum(nil)) - token = fmt.Sprintf("%s:%s", mac.AccessKey, sign) - return -} - -// 管理凭证生成时,是否同时对request body进行签名 -func incBody(req *http.Request) bool { - return req.Body != nil && req.Header.Get("Content-Type") == conf.CONTENT_TYPE_FORM -} - -func incBodyV2(req *http.Request) bool { - contentType := req.Header.Get("Content-Type") - return req.Body != nil && (contentType == conf.CONTENT_TYPE_FORM || contentType == conf.CONTENT_TYPE_JSON) -} - -// VerifyCallback 验证上传回调请求是否来自七牛 -func (mac *Mac) VerifyCallback(req *http.Request) (bool, error) { - auth := req.Header.Get("Authorization") - if auth == "" { - return false, nil - } - - token, err := mac.SignRequest(req) - if err != nil { - return false, err - } - - return auth == "QBox "+token, nil -} - -// Sign 一般用于下载凭证的签名 -func Sign(mac *Mac, data []byte) string { - return mac.Sign(data) -} - -// SignWithData 一般用于上传凭证的签名 -func SignWithData(mac *Mac, data []byte) string { - return mac.SignWithData(data) -} - -// VerifyCallback 验证上传回调请求是否来自七牛 -func VerifyCallback(mac *Mac, req *http.Request) (bool, error) { - return mac.VerifyCallback(req) -} diff --git a/vendor/github.com/qiniu/api.v7/cdn/anti_leech.go b/vendor/github.com/qiniu/api.v7/cdn/anti_leech.go deleted file mode 100644 index 37aa7e0..0000000 --- a/vendor/github.com/qiniu/api.v7/cdn/anti_leech.go +++ /dev/null @@ -1,33 +0,0 @@ -package cdn - -import ( - "crypto/md5" - "fmt" - "net/url" - "time" -) - -// CreateTimestampAntileechURL 用来构建七牛CDN时间戳防盗链的访问链接 -func CreateTimestampAntileechURL(urlStr string, encryptKey string, - durationInSeconds int64) (antileechURL string, err error) { - u, err := url.Parse(urlStr) - if err != nil { - return - } - - expireTime := time.Now().Add(time.Second * time.Duration(durationInSeconds)).Unix() - toSignStr := fmt.Sprintf("%s%s%x", encryptKey, u.EscapedPath(), expireTime) - signedStr := fmt.Sprintf("%x", md5.Sum([]byte(toSignStr))) - - q := url.Values{} - q.Add("sign", signedStr) - q.Add("t", fmt.Sprintf("%x", expireTime)) - - if u.RawQuery == "" { - antileechURL = u.String() + "?" + q.Encode() - } else { - antileechURL = u.String() + "&" + q.Encode() - } - - return -} diff --git a/vendor/github.com/qiniu/api.v7/cdn/api.go b/vendor/github.com/qiniu/api.v7/cdn/api.go deleted file mode 100644 index 31de5a1..0000000 --- a/vendor/github.com/qiniu/api.v7/cdn/api.go +++ /dev/null @@ -1,301 +0,0 @@ -package cdn - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/qiniu/api.v7/auth/qbox" -) - -// Fusion CDN服务域名 -var ( - FusionHost = "http://fusion.qiniuapi.com" -) - -// CdnManager 提供了文件和目录刷新,文件预取,获取域名带宽和流量数据,获取域名日志列表等功能 -type CdnManager struct { - mac *qbox.Mac -} - -// NewCdnManager 用来构建一个新的 CdnManager -func NewCdnManager(mac *qbox.Mac) *CdnManager { - return &CdnManager{mac: mac} -} - -// TrafficReq 为批量查询带宽/流量的API请求内容 -// StartDate 开始日期,格式例如:2016-07-01 -// EndDate 结束日期,格式例如:2016-07-03 -// Granularity 取值粒度,取值可选值:5min/hour/day -// Domains 域名列表,彼此用 ; 连接 -type TrafficReq struct { - StartDate string `json:"startDate"` - EndDate string `json:"endDate"` - Granularity string `json:"granularity"` - Domains string `json:"domains"` -} - -// TrafficResp 为带宽/流量查询响应内容 -type TrafficResp struct { - Code int `json:"code"` - Error string `json:"error"` - Time []string `json:"time,omitempty"` - Data map[string]TrafficData `json:"data,omitempty"` -} - -// TrafficData 为带宽/流量数据 -type TrafficData struct { - DomainChina []int `json:"china"` - DomainOversea []int `json:"oversea"` -} - -// GetBandwidthData 方法用来获取域名访问带宽数据 -// StartDate string 必须 开始日期,例如:2016-07-01 -// EndDate string 必须 结束日期,例如:2016-07-03 -// Granularity string 必须 粒度,取值:5min / hour /day -// Domains []string 必须 域名列表 -func (m *CdnManager) GetBandwidthData(startDate, endDate, granularity string, - domainList []string) (bandwidthData TrafficResp, err error) { - domains := strings.Join(domainList, ";") - reqBody := TrafficReq{ - StartDate: startDate, - EndDate: endDate, - Granularity: granularity, - Domains: domains, - } - - resData, reqErr := postRequest(m.mac, "/v2/tune/bandwidth", reqBody) - if reqErr != nil { - err = reqErr - return - } - umErr := json.Unmarshal(resData, &bandwidthData) - if umErr != nil { - err = umErr - return - } - return -} - -// GetFluxData 方法用来获取域名访问流量数据 -// StartDate string 必须 开始日期,例如:2016-07-01 -// EndDate string 必须 结束日期,例如:2016-07-03 -// Granularity string 必须 粒度,取值:5min / hour /day -// Domains []string 必须 域名列表 -func (m *CdnManager) GetFluxData(startDate, endDate, granularity string, - domainList []string) (fluxData TrafficResp, err error) { - domains := strings.Join(domainList, ";") - reqBody := TrafficReq{ - StartDate: startDate, - EndDate: endDate, - Granularity: granularity, - Domains: domains, - } - - resData, reqErr := postRequest(m.mac, "/v2/tune/flux", reqBody) - if reqErr != nil { - err = reqErr - return - } - - umErr := json.Unmarshal(resData, &fluxData) - if umErr != nil { - err = umErr - return - } - - return -} - -// RefreshReq 为缓存刷新请求内容 -type RefreshReq struct { - Urls []string `json:"urls"` - Dirs []string `json:"dirs"` -} - -// RefreshResp 缓存刷新响应内容 -type RefreshResp struct { - Code int `json:"code"` - Error string `json:"error"` - RequestID string `json:"requestId,omitempty"` - InvalidUrls []string `json:"invalidUrls,omitempty"` - InvalidDirs []string `json:"invalidDirs,omitempty"` - URLQuotaDay int `json:"urlQuotaDay,omitempty"` - URLSurplusDay int `json:"urlSurplusDay,omitempty"` - DirQuotaDay int `json:"dirQuotaDay,omitempty"` - DirSurplusDay int `json:"dirSurplusDay,omitempty"` -} - -// RefreshUrlsAndDirs 方法用来刷新文件或目录 -// urls 要刷新的单个url列表,单次方法调用总数不超过100条;单个url,即一个具体的url, -// 例如:http://bar.foo.com/index.html -// dirs 要刷新的目录url列表,单次方法调用总数不超过10条;目录dir,即表示一个目录级的url, -// 例如:http://bar.foo.com/dir/, -func (m *CdnManager) RefreshUrlsAndDirs(urls, dirs []string) (result RefreshResp, err error) { - if len(urls) > 100 { - err = errors.New("urls count exceeds the limit of 100") - return - } - if len(dirs) > 10 { - err = errors.New("dirs count exceeds the limit of 10") - return - } - - reqBody := RefreshReq{ - Urls: urls, - Dirs: dirs, - } - - resData, reqErr := postRequest(m.mac, "/v2/tune/refresh", reqBody) - if reqErr != nil { - err = reqErr - return - } - umErr := json.Unmarshal(resData, &result) - if umErr != nil { - err = reqErr - return - } - - return -} - -// RefreshUrls 刷新文件 -func (m *CdnManager) RefreshUrls(urls []string) (result RefreshResp, err error) { - return m.RefreshUrlsAndDirs(urls, nil) -} - -// RefreshDirs 刷新目录 -func (m *CdnManager) RefreshDirs(dirs []string) (result RefreshResp, err error) { - return m.RefreshUrlsAndDirs(nil, dirs) -} - -// PrefetchReq 文件预取请求内容 -type PrefetchReq struct { - Urls []string `json:"urls"` -} - -// PrefetchResp 文件预取响应内容 -type PrefetchResp struct { - Code int `json:"code"` - Error string `json:"error"` - RequestID string `json:"requestId,omitempty"` - InvalidUrls []string `json:"invalidUrls,omitempty"` - QuotaDay int `json:"quotaDay,omitempty"` - SurplusDay int `json:"surplusDay,omitempty"` -} - -// PrefetchUrls 预取文件链接,每次最多不可以超过100条 -func (m *CdnManager) PrefetchUrls(urls []string) (result PrefetchResp, err error) { - if len(urls) > 100 { - err = errors.New("urls count exceeds the limit of 100") - return - } - - reqBody := PrefetchReq{ - Urls: urls, - } - - resData, reqErr := postRequest(m.mac, "/v2/tune/prefetch", reqBody) - if reqErr != nil { - err = reqErr - return - } - - umErr := json.Unmarshal(resData, &result) - if umErr != nil { - err = umErr - return - } - - return -} - -// ListLogRequest 日志下载请求内容 -type ListLogRequest struct { - Day string `json:"day"` - Domains string `json:"domains"` -} - -// ListLogResult 日志下载相应内容 -type ListLogResult struct { - Code int `json:"code"` - Error string `json:"error"` - Data map[string][]LogDomainInfo `json:"data"` -} - -// LogDomainInfo 日志下载信息 -type LogDomainInfo struct { - Name string `json:"name"` - Size int64 `json:"size"` - ModifiedTime int64 `json:"mtime"` - URL string `json:"url"` -} - -// GetCdnLogList 获取CDN域名访问日志的下载链接 -func (m *CdnManager) GetCdnLogList(day string, domains []string) ( - listLogResult ListLogResult, err error) { - //new log query request - logReq := ListLogRequest{ - Day: day, - Domains: strings.Join(domains, ";"), - } - - resData, reqErr := postRequest(m.mac, "/v2/tune/log/list", logReq) - if reqErr != nil { - err = fmt.Errorf("get response error, %s", reqErr) - return - } - - if decodeErr := json.Unmarshal(resData, &listLogResult); decodeErr != nil { - err = fmt.Errorf("get response error, %s", decodeErr) - return - } - - if listLogResult.Error != "" { - err = fmt.Errorf("get log list error, %d %s", listLogResult.Code, listLogResult.Error) - return - } - - return -} - -// RequestWithBody 带body对api发出请求并且返回response body -func postRequest(mac *qbox.Mac, path string, body interface{}) (resData []byte, - err error) { - urlStr := fmt.Sprintf("%s%s", FusionHost, path) - reqData, _ := json.Marshal(body) - req, reqErr := http.NewRequest("POST", urlStr, bytes.NewReader(reqData)) - if reqErr != nil { - err = reqErr - return - } - - accessToken, signErr := mac.SignRequest(req) - if signErr != nil { - err = signErr - return - } - - req.Header.Add("Authorization", "QBox "+accessToken) - req.Header.Add("Content-Type", "application/json") - - resp, respErr := http.DefaultClient.Do(req) - if respErr != nil { - err = respErr - return - } - defer resp.Body.Close() - - resData, ioErr := ioutil.ReadAll(resp.Body) - if ioErr != nil { - err = ioErr - return - } - - return -} diff --git a/vendor/github.com/qiniu/api.v7/cdn/doc.go b/vendor/github.com/qiniu/api.v7/cdn/doc.go deleted file mode 100644 index 75c59cf..0000000 --- a/vendor/github.com/qiniu/api.v7/cdn/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// cdn 包提供了 Fusion CDN的常见功能。相关功能的文档参考:https://developer.qiniu.com/fusion。 -// 目前提供了文件和目录刷新,文件预取,获取域名带宽和流量数据,获取域名日志列表等功能。 -package cdn diff --git a/vendor/github.com/qiniu/api.v7/conf/conf.go b/vendor/github.com/qiniu/api.v7/conf/conf.go deleted file mode 100644 index 5894514..0000000 --- a/vendor/github.com/qiniu/api.v7/conf/conf.go +++ /dev/null @@ -1,9 +0,0 @@ -package conf - -const Version = "7.3.0" - -const ( - CONTENT_TYPE_JSON = "application/json" - CONTENT_TYPE_FORM = "application/x-www-form-urlencoded" - CONTENT_TYPE_OCTET = "application/octet-stream" -) diff --git a/vendor/github.com/qiniu/api.v7/conf/doc.go b/vendor/github.com/qiniu/api.v7/conf/doc.go deleted file mode 100644 index 0c5bd7b..0000000 --- a/vendor/github.com/qiniu/api.v7/conf/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// conf 包提供了设置APP名称的方法。该APP名称会被放入API请求的UserAgent中,方便后续查询日志分析问题。 -package conf diff --git a/vendor/github.com/qiniu/api.v7/doc.go b/vendor/github.com/qiniu/api.v7/doc.go deleted file mode 100644 index 2af2de3..0000000 --- a/vendor/github.com/qiniu/api.v7/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - -包 github.com/qiniu/api.v7 是七牛 Go 语言 SDK v7.x 版本。 - -主要提供了存储的数据上传,下载,管理以及CDN相关的功能。要求Go语言版本>=1.7.0。 - -Go SDK 中主要包含几个包: - -auth 包提供鉴权相关方法,conf 包提供配置相关方法,cdn包提供CDN相关的功能,storage包提供存储相关的功能。 - -*/ -package api - -import ( - _ "github.com/qiniu/api.v7/auth/qbox" - _ "github.com/qiniu/api.v7/cdn" - _ "github.com/qiniu/api.v7/conf" - _ "github.com/qiniu/api.v7/rtc" - _ "github.com/qiniu/api.v7/storage" -) diff --git a/vendor/github.com/qiniu/api.v7/rtc/api.go b/vendor/github.com/qiniu/api.v7/rtc/api.go deleted file mode 100644 index 296cb73..0000000 --- a/vendor/github.com/qiniu/api.v7/rtc/api.go +++ /dev/null @@ -1,246 +0,0 @@ -package rtc - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/qiniu/api.v7/auth/qbox" -) - -var ( - // RtcHost 为 Qiniu RTC Server API服务域名 - RtcHost = "rtc.qiniuapi.com" -) - -// Manager 提供了 Qiniu RTC Server API 相关功能 -type Manager struct { - mac *qbox.Mac - httpClient *http.Client -} - -// MergePublishRtmp 连麦合流转推 RTMP 的配置 -// Enable: 布尔类型,用于开启和关闭所有房间的合流功能。 -// AudioOnly: 布尔类型,可选,指定是否只合成音频。 -// Height, Width: int,可选,指定合流输出的高和宽,默认为 640 x 480。 -// OutputFps: int,可选,指定合流输出的帧率,默认为 25 fps 。 -// OutputKbps: int,可选,指定合流输出的码率,默认为 1000 。 -// URL: 合流后转推旁路直播的地址,可选,支持魔法变量配置按照连麦房间号生成不同的推流地址。如果是转推到七牛直播云,不建议使用该配置 -// StreamTitle: 转推七牛直播云的流名,可选,支持魔法变量配置按照连麦房间号生成不同的流名。例如,配置 Hub 为 qn-zhibo ,配置 StreamTitle 为 $(roomName) ,则房间 meeting-001 的合流将会被转推到 rtmp://pili-publish.qn-zhibo.***.com/qn-zhibo/meeting-001地址。详细配置细则,请咨询七牛技术支持。 -type MergePublishRtmp struct { - Enable bool `json:"enable,omitempty"` - AudioOnly bool `json:"audioOnly,omitempty"` - Height int `json:"height,omitempty"` - Width int `json:"width,omitempty"` - OutputFps int `json:"fps,omitempty"` - OutputKbps int `json:"kbps,omitempty"` - URL string `json:"url,omitempty"` - StreamTitle string `json:"streamTitle,omitempty"` -} - -// App 完整信息 -// AppID: app 的唯一标识,创建的时候由系统生成。 -type App struct { - AppID string `json:"appId"` - AppInitConf - MergePublishRtmp MergePublishRtmp `json:"mergePublishRtmp,omitempty"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` -} - -// AppInitConf 创建 App 请求参数 -// Title: app 的名称, 可选。 -// Hub: 绑定的直播 hub,可选,用于合流后 rtmp 推流。 -// MaxUsers: int 类型,可选,连麦房间支持的最大在线人数。 -// NoAutoKickUser: bool 类型,可选,禁止自动踢人。 -type AppInitConf struct { - Hub string `json:"hub,omitempty"` - Title string `json:"title,omitempty"` - MaxUsers int `json:"maxUsers,omitempty"` - NoAutoKickUser bool `json:"noAutoKickUser,omitempty"` -} - -// MergePublishRtmpInfo 连麦合流转推 RTMP 的配置更改信息 -type MergePublishRtmpInfo struct { - Enable *bool `json:"enable,omitempty"` - AudioOnly *bool `json:"audioOnly,omitempty"` - Height *int `json:"height,omitempty"` - Width *int `json:"width,omitempty"` - OutputFps *int `json:"fps,omitempty"` - OutputKbps *int `json:"kbps,omitempty"` - URL *string `json:"url,omitempty"` - StreamTitle *string `json:"streamTitle,omitempty"` -} - -// AppUpdateInfo 更改信息 -// MergePublishRtmpInfo 连麦合流转推 RTMP 的配置更改信息 -type AppUpdateInfo struct { - Hub *string `json:"hub,omitempty"` - Title *string `json:"title,omitempty"` - MaxUsers *int `json:"maxUsers,omitempty"` - NoAutoKickUser *bool `json:"noAutoKickUser,omitempty"` - MergePublishRtmp *MergePublishRtmpInfo `json:"mergePublishRtmp,omitempty"` -} - -// User 连麦房间里的用户 -type User struct { - UserID string `json:"userId"` -} - -// NewManager 用来构建一个新的 Manager -func NewManager(mac *qbox.Mac) *Manager { - httpClient := http.DefaultClient - return &Manager{mac: mac, httpClient: httpClient} -} - -// CreateApp 新建实时音视频云 -func (r *Manager) CreateApp(appReq AppInitConf) (App, error) { - url := buildURL("/v3/apps") - ret := App{} - info := postReq(r.httpClient, r.mac, url, &appReq, &ret) - return ret, info.Err -} - -// GetApp 根据 appID 获取 实时音视频云 信息 -func (r *Manager) GetApp(appID string) (App, error) { - url := buildURL("/v3/apps/" + appID) - ret := App{} - info := getReq(r.httpClient, r.mac, url, &ret) - return ret, info.Err -} - -// DeleteApp 根据 appID 删除 实时音视频云 -func (r *Manager) DeleteApp(appID string) error { - url := buildURL("/v3/apps/" + appID) - info := delReq(r.httpClient, r.mac, url, nil) - return info.Err -} - -// UpdateApp 根据 appID, App 更改实时音视频云 信息 -func (r *Manager) UpdateApp(appID string, appInfo AppUpdateInfo) (App, error) { - url := buildURL("/v3/apps/" + appID) - ret := App{} - info := postReq(r.httpClient, r.mac, url, &appInfo, &ret) - return ret, info.Err -} - -// ListUser 根据 appID, roomName 获取连麦房间里在线的用户 -// appID: 连麦房间所属的 app 。 -// roomName: 操作所查询的连麦房间。 -func (r *Manager) ListUser(appID, roomName string) ([]User, error) { - url := buildURL("/v3/apps/" + appID + "/rooms/" + roomName + "/users") - users := struct { - Users []User `json:"users"` - }{} - info := getReq(r.httpClient, r.mac, url, &users) - return users.Users, info.Err -} - -// KickUser 根据 appID, roomName, UserID 剔除在线的用户 -// appID: 连麦房间所属的 app 。 -// roomName: 连麦房间。 -// userID: 操作所剔除的用户。 -func (r *Manager) KickUser(appID, roomName, userID string) error { - url := buildURL("/v3/apps/" + appID + "/rooms/" + roomName + "/users/" + userID) - info := delReq(r.httpClient, r.mac, url, nil) - return info.Err -} - -// RoomQuery 房间查询响应结果 -// IsEnd: bool 类型,分页查询是否已经查完所有房间。 -// Offset: int 类型,下次分页查询使用的位移标记。 -// Rooms: 当前活跃的房间名列表。 -type RoomQuery struct { - IsEnd bool `json:"end"` - Offset int `json:"offset"` - Rooms []RoomName `json:"rooms"` -} - -// RoomName 房间名 -type RoomName string - -// ListActiveRooms 根据 appID, roomNamePrefix, offset, limit 查询当前活跃的房间 -// appID: 连麦房间所属的 app 。 -// roomNamePrefix: 所查询房间名的前缀索引,可以为空。 -// offset: int 类型,分页查询的位移标记。 -// limit: int 类型,此次查询的最大长度。 -func (r *Manager) ListActiveRooms(appID, roomNamePrefix string, offset, limit int) (RoomQuery, error) { - ret, _, err := r.doListActiveRoom(appID, roomNamePrefix, offset, limit) - return ret, err -} - -// ListAllActiveRooms 根据 appID, roomNamePrefix 查询当前活跃的房间 -// appID: 连麦房间所属的 app 。 -// roomNamePrefix: 所查询房间名的前缀索引,可以为空。 -func (r *Manager) ListAllActiveRooms(appID, roomNamePrefix string) ([]RoomName, error) { - ns := []RoomName{} - var outErr error - for offset := 0; ; { - q, info, err := r.doListActiveRoom(appID, roomNamePrefix, offset, 100) - if err != nil && info.Code != 401 { - time.Sleep(500 * time.Millisecond) - q, info, err = r.doListActiveRoom(appID, roomNamePrefix, offset, 100) - } - - if err != nil || len(q.Rooms) == 0 { - outErr = err - break - } - offset = q.Offset - ns = append(ns, q.Rooms...) - if q.IsEnd { - break - } - } - return ns, outErr -} - -func (r *Manager) doListActiveRoom(appID, roomNamePrefix string, offset, limit int) (RoomQuery, resInfo, error) { - query := "" - roomNamePrefix = strings.TrimSpace(roomNamePrefix) - if len(roomNamePrefix) != 0 { - query = "prefix=" + roomNamePrefix + "&" - } - query += fmt.Sprintf("offset=%v&limit=%v", offset, limit) - url := buildURL("/v3/apps/" + appID + "/rooms?" + query) - ret := RoomQuery{} - info := getReq(r.httpClient, r.mac, url, &ret) - return ret, *info, info.Err -} - -// RoomAccess 房间管理凭证 -// AppID: 房间所属帐号的 app 。 -// RoomName: 房间名称,需满足规格 ^[a-zA-Z0-9_-]{3,64}$ -// UserID: 请求加入房间的用户 ID,需满足规格 ^[a-zA-Z0-9_-]{3,50}$ -// ExpireAt: int64 类型,鉴权的有效时间,传入以秒为单位的64位Unix绝对时间,token 将在该时间后失效。 -// Permission: 该用户的房间管理权限,"admin" 或 "user",默认为 "user" 。当权限角色为 "admin" 时,拥有将其他用户移除出房间等特权. -type RoomAccess struct { - AppID string `json:"appId"` - RoomName string `json:"roomName"` - UserID string `json:"userId"` - ExpireAt int64 `json:"expireAt"` - Permission string `json:"permission"` -} - -// GetRoomToken 生成房间管理鉴权,连麦用户终端通过房间管理鉴权获取七牛连麦服务。 -func (r *Manager) GetRoomToken(roomAccess RoomAccess) (token string, err error) { - roomAccessByte, err := json.Marshal(roomAccess) - if err != nil { - return - } - buf := make([]byte, base64.URLEncoding.EncodedLen(len(roomAccessByte))) - base64.URLEncoding.Encode(buf, roomAccessByte) - - hmacsha1 := hmac.New(sha1.New, r.mac.SecretKey) - hmacsha1.Write(buf) - sign := hmacsha1.Sum(nil) - - encodedSign := base64.URLEncoding.EncodeToString(sign) - token = r.mac.AccessKey + ":" + encodedSign + ":" + string(buf) - return -} diff --git a/vendor/github.com/qiniu/api.v7/rtc/doc.go b/vendor/github.com/qiniu/api.v7/rtc/doc.go deleted file mode 100644 index 6a8ab9b..0000000 --- a/vendor/github.com/qiniu/api.v7/rtc/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Qiniu RTC Server API 为七牛实时音视频云提供权限验证和房间管理功能,API 均采用 REST 接口。 -// 提供 app 操作接口,包含 CreateApp、GetApp、DeleteApp、UpdateApp ; -// 提供 room 操作接口,包含 ListUser、KickUser、ListActiveRoom 以及 -// RoomToken 的计算 - -package rtc diff --git a/vendor/github.com/qiniu/api.v7/rtc/util.go b/vendor/github.com/qiniu/api.v7/rtc/util.go deleted file mode 100644 index 86933fb..0000000 --- a/vendor/github.com/qiniu/api.v7/rtc/util.go +++ /dev/null @@ -1,140 +0,0 @@ -package rtc - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/qiniu/api.v7/auth/qbox" -) - -// resInfo is httpresponse infomation -type resInfo struct { - Code int - Err error -} - -func newResInfo() resInfo { - info := resInfo{} - return info -} - -func getReqid(src *http.Header) string { - for k, v := range *src { - K := strings.Title(k) - if strings.Contains(K, "Reqid") { - return strings.Join(v, ", ") - } - } - return "" -} - -func buildURL(path string) string { - if strings.Index(path, "/") != 0 { - path = "/" + path - } - return "https://" + RtcHost + path -} - -func postReq(httpClient *http.Client, mac *qbox.Mac, url string, - reqParam interface{}, ret interface{}) *resInfo { - info := newResInfo() - var reqData []byte - var err error - - switch v := reqParam.(type) { - case *string: - reqData = []byte(*v) - case string: - reqData = []byte(v) - case *[]byte: - reqData = *v - case []byte: - reqData = v - default: - reqData, err = json.Marshal(reqParam) - } - - if err != nil { - info.Err = err - return &info - } - req, err := http.NewRequest("POST", url, bytes.NewReader(reqData)) - if err != nil { - info.Err = err - return &info - } - req.Header.Add("Content-Type", "application/json") - return callReq(httpClient, req, mac, &info, ret) -} - -func getReq(httpClient *http.Client, mac *qbox.Mac, url string, ret interface{}) *resInfo { - info := newResInfo() - req, err := http.NewRequest("GET", url, nil) - if err != nil { - info.Err = err - return &info - } - return callReq(httpClient, req, mac, &info, ret) -} - -func delReq(httpClient *http.Client, mac *qbox.Mac, url string, ret interface{}) *resInfo { - info := newResInfo() - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - info.Err = err - return &info - } - return callReq(httpClient, req, mac, &info, ret) -} - -func callReq(httpClient *http.Client, req *http.Request, mac *qbox.Mac, - info *resInfo, ret interface{}) (oinfo *resInfo) { - oinfo = info - accessToken, err := mac.SignRequestV2(req) - if err != nil { - info.Err = err - return - } - req.Header.Add("Authorization", "Qiniu "+accessToken) - client := httpClient - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req) - if err != nil { - info.Err = err - return - } - defer resp.Body.Close() - info.Code = resp.StatusCode - reqid := getReqid(&resp.Header) - rebuildErr := func(msg string) error { - return fmt.Errorf("Code: %v, Reqid: %v, %v", info.Code, reqid, msg) - } - - if resp.ContentLength > 2*1024*1024 { - err = rebuildErr(fmt.Sprintf("response is too long. Content-Length: %v", resp.ContentLength)) - info.Err = err - return - } - resData, err := ioutil.ReadAll(resp.Body) - if err != nil { - info.Err = rebuildErr(err.Error()) - return - } - if info.Code != 200 { - info.Err = rebuildErr(string(resData)) - return - } - if ret != nil { - err = json.Unmarshal(resData, ret) - if err != nil { - info.Err = rebuildErr(fmt.Sprintf("err: %v, res: %v", err, resData)) - } - } - return -} diff --git a/vendor/github.com/qiniu/api.v7/storage/base64_upload.go b/vendor/github.com/qiniu/api.v7/storage/base64_upload.go deleted file mode 100644 index 8729eb4..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/base64_upload.go +++ /dev/null @@ -1,173 +0,0 @@ -package storage - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "hash/crc32" - "io" - "net/http" - "strconv" - "strings" -) - -// Base64Uploader 表示一个Base64上传对象 -type Base64Uploader struct { - client *Client - cfg *Config -} - -// NewBase64Uploader 用来构建一个Base64上传的对象 -func NewBase64Uploader(cfg *Config) *Base64Uploader { - if cfg == nil { - cfg = &Config{} - } - - return &Base64Uploader{ - client: &DefaultClient, - cfg: cfg, - } -} - -// NewBase64UploaderEx 用来构建一个Base64上传的对象 -func NewBase64UploaderEx(cfg *Config, client *Client) *Base64Uploader { - if cfg == nil { - cfg = &Config{} - } - - if client == nil { - client = &DefaultClient - } - - return &Base64Uploader{ - client: client, - cfg: cfg, - } -} - -// Base64PutExtra 为Base64上传的额外可选项 -type Base64PutExtra struct { - // 可选,用户自定义参数,必须以 "x:" 开头。若不以x:开头,则忽略。 - Params map[string]string - - // 可选,当为 "" 时候,服务端自动判断。 - MimeType string -} - -// Put 用来以Base64方式上传一个文件 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody,那么返回的数据结构是 PutRet 结构。 -// uptoken 是由业务服务器颁发的上传凭证。 -// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 -// base64Data 是要上传的Base64数据,一般为图片数据的Base64编码字符串 -// extra 是上传的一些可选项,可以指定为nil。详细见 Base64PutExtra 结构的描述。 -// -func (p *Base64Uploader) Put( - ctx context.Context, ret interface{}, uptoken, key string, base64Data []byte, extra *Base64PutExtra) (err error) { - return p.put(ctx, ret, uptoken, key, true, base64Data, extra) -} - -// PutWithoutKey 用来以Base64方式上传一个文件,保存的文件名以文件的内容hash作为文件名 -func (p *Base64Uploader) PutWithoutKey( - ctx context.Context, ret interface{}, uptoken string, base64Data []byte, extra *Base64PutExtra) (err error) { - return p.put(ctx, ret, uptoken, "", false, base64Data, extra) -} - -func (p *Base64Uploader) put( - ctx context.Context, ret interface{}, uptoken, key string, hasKey bool, base64Data []byte, extra *Base64PutExtra) (err error) { - //get up host - ak, bucket, gErr := getAkBucketFromUploadToken(uptoken) - if gErr != nil { - err = gErr - return - } - - var upHost string - upHost, err = p.upHost(ak, bucket) - if err != nil { - return - } - - //set default extra - if extra == nil { - extra = &Base64PutExtra{} - } - - //calc crc32 - h := crc32.NewIEEE() - rawReader := base64.NewDecoder(base64.StdEncoding, bytes.NewReader(base64Data)) - fsize, decodeErr := io.Copy(h, rawReader) - if decodeErr != nil { - err = fmt.Errorf("invalid base64 data, %s", decodeErr.Error()) - return - } - fCrc32 := h.Sum32() - - postPath := bytes.NewBufferString("/putb64") - //add fsize - postPath.WriteString("/") - postPath.WriteString(strconv.Itoa(int(fsize))) - - //add key - if hasKey { - postPath.WriteString("/key/") - postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(key))) - } - //add mimeType - if extra.MimeType != "" { - postPath.WriteString("/mimeType/") - postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(extra.MimeType))) - } - - //add crc32 - postPath.WriteString("/crc32/") - postPath.WriteString(fmt.Sprintf("%d", fCrc32)) - - //add extra params - if len(extra.Params) > 0 { - for k, v := range extra.Params { - if strings.HasPrefix(k, "x:") && v != "" { - postPath.WriteString("/") - postPath.WriteString(k) - postPath.WriteString("/") - postPath.WriteString(base64.URLEncoding.EncodeToString([]byte(v))) - } - } - } - - postURL := fmt.Sprintf("%s%s", upHost, postPath.String()) - headers := http.Header{} - headers.Add("Content-Type", "application/octet-stream") - headers.Add("Authorization", "UpToken "+uptoken) - - return p.client.CallWith(ctx, ret, "POST", postURL, headers, bytes.NewReader(base64Data), len(base64Data)) -} - -func (p *Base64Uploader) upHost(ak, bucket string) (upHost string, err error) { - var zone *Zone - if p.cfg.Zone != nil { - zone = p.cfg.Zone - } else { - if v, zoneErr := GetZone(ak, bucket); zoneErr != nil { - err = zoneErr - return - } else { - zone = v - } - } - - scheme := "http://" - if p.cfg.UseHTTPS { - scheme = "https://" - } - - host := zone.SrcUpHosts[0] - if p.cfg.UseCdnDomains { - host = zone.CdnUpHosts[0] - } - - upHost = fmt.Sprintf("%s%s", scheme, host) - return -} diff --git a/vendor/github.com/qiniu/api.v7/storage/bucket.go b/vendor/github.com/qiniu/api.v7/storage/bucket.go deleted file mode 100644 index 211620e..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/bucket.go +++ /dev/null @@ -1,742 +0,0 @@ -package storage - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "net/url" - "strconv" - "strings" - - "github.com/qiniu/api.v7/auth/qbox" - "github.com/qiniu/api.v7/conf" - "net/http" -) - -// 资源管理相关的默认域名 -const ( - DefaultRsHost = "rs.qiniu.com" - DefaultRsfHost = "rsf.qiniu.com" - DefaultAPIHost = "api.qiniu.com" - DefaultPubHost = "pu.qbox.me:10200" -) - -// FileInfo 文件基本信息 -type FileInfo struct { - Hash string `json:"hash"` - Fsize int64 `json:"fsize"` - PutTime int64 `json:"putTime"` - MimeType string `json:"mimeType"` - Type int `json:"type"` -} - -func (f *FileInfo) String() string { - str := "" - str += fmt.Sprintf("Hash: %s\n", f.Hash) - str += fmt.Sprintf("Fsize: %d\n", f.Fsize) - str += fmt.Sprintf("PutTime: %d\n", f.PutTime) - str += fmt.Sprintf("MimeType: %s\n", f.MimeType) - str += fmt.Sprintf("Type: %d\n", f.Type) - return str -} - -// FetchRet 资源抓取的返回值 -type FetchRet struct { - Hash string `json:"hash"` - Fsize int64 `json:"fsize"` - MimeType string `json:"mimeType"` - Key string `json:"key"` -} - -type listFilesRet2 struct { - Marker string `json:"marker"` - Item ListItem `json:"item"` - Dir string `json:"dir"` -} - -func (r *FetchRet) String() string { - str := "" - str += fmt.Sprintf("Key: %s\n", r.Key) - str += fmt.Sprintf("Hash: %s\n", r.Hash) - str += fmt.Sprintf("Fsize: %d\n", r.Fsize) - str += fmt.Sprintf("MimeType: %s\n", r.MimeType) - return str -} - -// ListItem 为文件列举的返回值 -type ListItem struct { - Key string `json:"key"` - Hash string `json:"hash"` - Fsize int64 `json:"fsize"` - PutTime int64 `json:"putTime"` - MimeType string `json:"mimeType"` - Type int `json:"type"` - EndUser string `json:"endUser"` -} - -// 接口可能返回空的记录 -func (l *ListItem) IsEmpty() (empty bool) { - return l.Key == "" && l.Hash == "" && l.Fsize == 0 && l.PutTime == 0 -} - -func (l *ListItem) String() string { - str := "" - str += fmt.Sprintf("Hash: %s\n", l.Hash) - str += fmt.Sprintf("Fsize: %d\n", l.Fsize) - str += fmt.Sprintf("PutTime: %d\n", l.PutTime) - str += fmt.Sprintf("MimeType: %s\n", l.MimeType) - str += fmt.Sprintf("Type: %d\n", l.Type) - str += fmt.Sprintf("EndUser: %s\n", l.EndUser) - return str -} - -// BatchOpRet 为批量执行操作的返回值 -// 批量操作支持 stat,copy,delete,move,chgm,chtype,deleteAfterDays几个操作 -// 其中 stat 为获取文件的基本信息,如果文件存在则返回基本信息,如果文件不存在返回 error 。 -// 其他的操作,如果成功,则返回 code,不成功会同时返回 error 信息,可以根据 error 信息来判断问题所在。 -type BatchOpRet struct { - Code int `json:"code,omitempty"` - Data struct { - Hash string `json:"hash"` - Fsize int64 `json:"fsize"` - PutTime int64 `json:"putTime"` - MimeType string `json:"mimeType"` - Type int `json:"type"` - Error string `json:"error"` - } `json:"data,omitempty"` -} - -// BucketManager 提供了对资源进行管理的操作 -type BucketManager struct { - Client *Client - Mac *qbox.Mac - Cfg *Config -} - -// NewBucketManager 用来构建一个新的资源管理对象 -func NewBucketManager(mac *qbox.Mac, cfg *Config) *BucketManager { - if cfg == nil { - cfg = &Config{} - } - if cfg.CentralRsHost == "" { - cfg.CentralRsHost = DefaultRsHost - } - - return &BucketManager{ - Client: &DefaultClient, - Mac: mac, - Cfg: cfg, - } -} - -// NewBucketManagerEx 用来构建一个新的资源管理对象 -func NewBucketManagerEx(mac *qbox.Mac, cfg *Config, client *Client) *BucketManager { - if cfg == nil { - cfg = &Config{} - } - - if client == nil { - client = &DefaultClient - } - if cfg.CentralRsHost == "" { - cfg.CentralRsHost = DefaultRsHost - } - - return &BucketManager{ - Client: client, - Mac: mac, - Cfg: cfg, - } -} - -// Buckets 用来获取空间列表,如果指定了 shared 参数为 true,那么一同列表被授权访问的空间 -func (m *BucketManager) Buckets(shared bool) (buckets []string, err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - var reqHost string - - reqHost = m.Cfg.RsReqHost() - reqURL := fmt.Sprintf("%s/buckets?shared=%v", reqHost, shared) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, &buckets, "POST", reqURL, headers) - return -} - -// Stat 用来获取一个文件的基本信息 -func (m *BucketManager) Stat(bucket, key string) (info FileInfo, err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - - reqURL := fmt.Sprintf("%s%s", reqHost, URIStat(bucket, key)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, &info, "POST", reqURL, headers) - return -} - -// Delete 用来删除空间中的一个文件 -func (m *BucketManager) Delete(bucket, key string) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - reqURL := fmt.Sprintf("%s%s", reqHost, URIDelete(bucket, key)) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// Copy 用来创建已有空间中的文件的一个新的副本 -func (m *BucketManager) Copy(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsReqHost(srcBucket) - if reqErr != nil { - err = reqErr - return - } - - reqURL := fmt.Sprintf("%s%s", reqHost, URICopy(srcBucket, srcKey, destBucket, destKey, force)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// Move 用来将空间中的一个文件移动到新的空间或者重命名 -func (m *BucketManager) Move(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsReqHost(srcBucket) - if reqErr != nil { - err = reqErr - return - } - - reqURL := fmt.Sprintf("%s%s", reqHost, URIMove(srcBucket, srcKey, destBucket, destKey, force)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// ChangeMime 用来更新文件的MimeType -func (m *BucketManager) ChangeMime(bucket, key, newMime string) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeMime(bucket, key, newMime)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// ChangeType 用来更新文件的存储类型,0表示普通存储,1表示低频存储 -func (m *BucketManager) ChangeType(bucket, key string, fileType int) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeType(bucket, key, fileType)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// DeleteAfterDays 用来更新文件生命周期,如果 days 设置为0,则表示取消文件的定期删除功能,永久存储 -func (m *BucketManager) DeleteAfterDays(bucket, key string, days int) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - - reqURL := fmt.Sprintf("%s%s", reqHost, URIDeleteAfterDays(bucket, key, days)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// Batch 接口提供了资源管理的批量操作,支持 stat,copy,move,delete,chgm,chtype,deleteAfterDays几个接口 -func (m *BucketManager) Batch(operations []string) (batchOpRet []BatchOpRet, err error) { - if len(operations) > 1000 { - err = errors.New("batch operation count exceeds the limit of 1000") - return - } - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - scheme := "http://" - if m.Cfg.UseHTTPS { - scheme = "https://" - } - reqURL := fmt.Sprintf("%s%s/batch", scheme, m.Cfg.CentralRsHost) - params := map[string][]string{ - "op": operations, - } - err = m.Client.CallWithForm(ctx, &batchOpRet, "POST", reqURL, nil, params) - return -} - -// Fetch 根据提供的远程资源链接来抓取一个文件到空间并已指定文件名保存 -func (m *BucketManager) Fetch(resURL, bucket, key string) (fetchRet FetchRet, err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - - reqHost, rErr := m.IoReqHost(bucket) - if rErr != nil { - err = rErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, uriFetch(resURL, bucket, key)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, &fetchRet, "POST", reqURL, headers) - return -} - -func (m *BucketManager) RsReqHost(bucket string) (reqHost string, err error) { - var reqErr error - - if m.Cfg.RsHost == "" { - reqHost, reqErr = m.RsHost(bucket) - if reqErr != nil { - err = reqErr - return - } - } else { - reqHost = m.Cfg.RsHost - } - if !strings.HasPrefix(reqHost, "http") { - reqHost = "http://" + reqHost - } - return -} - -func (m *BucketManager) ApiReqHost(bucket string) (reqHost string, err error) { - var reqErr error - - if m.Cfg.ApiHost == "" { - reqHost, reqErr = m.ApiHost(bucket) - if reqErr != nil { - err = reqErr - return - } - } else { - reqHost = m.Cfg.ApiHost - } - if !strings.HasPrefix(reqHost, "http") { - reqHost = "http://" + reqHost - } - return -} - -func (m *BucketManager) RsfReqHost(bucket string) (reqHost string, err error) { - var reqErr error - - if m.Cfg.RsfHost == "" { - reqHost, reqErr = m.RsfHost(bucket) - if reqErr != nil { - err = reqErr - return - } - } else { - reqHost = m.Cfg.RsfHost - } - if !strings.HasPrefix(reqHost, "http") { - reqHost = "http://" + reqHost - } - return -} - -func (m *BucketManager) IoReqHost(bucket string) (reqHost string, err error) { - var reqErr error - - if m.Cfg.IoHost == "" { - reqHost, reqErr = m.IovipHost(bucket) - if reqErr != nil { - err = reqErr - return - } - } else { - reqHost = m.Cfg.IoHost - } - if !strings.HasPrefix(reqHost, "http") { - reqHost = "http://" + reqHost - } - return -} - -// FetchWithoutKey 根据提供的远程资源链接来抓取一个文件到空间并以文件的内容hash作为文件名 -func (m *BucketManager) FetchWithoutKey(resURL, bucket string) (fetchRet FetchRet, err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - - reqHost, rErr := m.IoReqHost(bucket) - if rErr != nil { - err = rErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, uriFetchWithoutKey(resURL, bucket)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, &fetchRet, "POST", reqURL, headers) - return -} - -// Prefetch 用来同步镜像空间的资源和镜像源资源内容 -func (m *BucketManager) Prefetch(bucket, key string) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.IoReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, uriPrefetch(bucket, key)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// SetImage 用来设置空间镜像源 -func (m *BucketManager) SetImage(siteURL, bucket string) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriSetImage(siteURL, bucket)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// SetImageWithHost 用来设置空间镜像源,额外添加回源Host头部 -func (m *BucketManager) SetImageWithHost(siteURL, bucket, host string) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, - uriSetImageWithHost(siteURL, bucket, host)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return -} - -// UnsetImage 用来取消空间镜像源设置 -func (m *BucketManager) UnsetImage(bucket string) (err error) { - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqURL := fmt.Sprintf("http://%s%s", DefaultPubHost, uriUnsetImage(bucket)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, nil, "POST", reqURL, headers) - return err -} - -type listFilesRet struct { - Marker string `json:"marker"` - Items []ListItem `json:"items"` - CommonPrefixes []string `json:"commonPrefixes"` -} - -// ListFiles 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix,文件的目录 delimiter,循环列举的时候下次 -// 列举的位置 marker,以及每次返回的文件的最大数量limit,其中limit最大为1000。 -func (m *BucketManager) ListFiles(bucket, prefix, delimiter, marker string, - limit int) (entries []ListItem, commonPrefixes []string, nextMarker string, hasNext bool, err error) { - if limit <= 0 || limit > 1000 { - err = errors.New("invalid list limit, only allow [1, 1000]") - return - } - - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsfReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - - ret := listFilesRet{} - reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles(bucket, prefix, delimiter, marker, limit)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, &ret, "POST", reqURL, headers) - if err != nil { - return - } - - commonPrefixes = ret.CommonPrefixes - nextMarker = ret.Marker - entries = ret.Items - if ret.Marker != "" { - hasNext = true - } - - return -} - -// ListBucket 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix,文件的目录 delimiter,流式返回每条数据。 -func (m *BucketManager) ListBucket(bucket, prefix, delimiter, marker string) (retCh chan listFilesRet2, err error) { - - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.RsfReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - - // limit 0 ==> 列举所有文件 - reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles2(bucket, prefix, delimiter, marker)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - retCh, err = m.Client.CallChan(ctx, "POST", reqURL, headers) - return -} - -// ListBucketCancel 用来获取空间文件列表,可以根据需要指定文件的前缀 prefix,文件的目录 delimiter,流式返回每条数据。 -// 接受的context可以用来取消列举操作 -func (m *BucketManager) ListBucketContext(ctx context.Context, bucket, prefix, delimiter, marker string) (retCh chan listFilesRet2, err error) { - - vctx := context.WithValue(ctx, "mac", m.Mac) - reqHost, reqErr := m.RsfReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - - // limit 0 ==> 列举所有文件 - reqURL := fmt.Sprintf("%s%s", reqHost, uriListFiles2(bucket, prefix, delimiter, marker)) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - retCh, err = m.Client.CallChan(vctx, "POST", reqURL, headers) - return -} - -type AsyncFetchParam struct { - Url string `json:"url"` - Host string `json:"host,omitempty"` - Bucket string `json:"bucket"` - Key string `json:"key,omitempty"` - Md5 string `json:"md5,omitempty"` - Etag string `json:"etag,omitempty"` - CallbackURL string `json:"callbackurl,omitempty"` - CallbackBody string `json:"callbackbody,omitempty"` - CallbackBodyType string `json:"callbackbodytype,omitempty"` - FileType int `json:"file_type,omitempty"` -} - -type AsyncFetchRet struct { - Id string `json:"id"` - Wait int `json:"wait"` -} - -func (m *BucketManager) AsyncFetch(param AsyncFetchParam) (ret AsyncFetchRet, err error) { - - reqUrl, err := m.ApiReqHost(param.Bucket) - if err != nil { - return - } - - reqUrl += "/sisyphus/fetch" - - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_JSON) - err = m.Client.CallWithJson(ctx, &ret, "POST", reqUrl, headers, param) - return -} - -func (m *BucketManager) RsHost(bucket string) (rsHost string, err error) { - zone, err := m.Zone(bucket) - if err != nil { - return - } - - rsHost = zone.GetRsHost(m.Cfg.UseHTTPS) - return -} - -func (m *BucketManager) RsfHost(bucket string) (rsfHost string, err error) { - zone, err := m.Zone(bucket) - if err != nil { - return - } - - rsfHost = zone.GetRsfHost(m.Cfg.UseHTTPS) - return -} - -func (m *BucketManager) IovipHost(bucket string) (iovipHost string, err error) { - zone, err := m.Zone(bucket) - if err != nil { - return - } - - iovipHost = zone.GetIoHost(m.Cfg.UseHTTPS) - return -} - -func (m *BucketManager) ApiHost(bucket string) (apiHost string, err error) { - zone, err := m.Zone(bucket) - if err != nil { - return - } - - apiHost = zone.GetApiHost(m.Cfg.UseHTTPS) - return -} - -func (m *BucketManager) Zone(bucket string) (z *Zone, err error) { - - if m.Cfg.Zone != nil { - z = m.Cfg.Zone - return - } - - z, err = GetZone(m.Mac.AccessKey, bucket) - return -} - -// 构建op的方法,导出的方法支持在Batch操作中使用 - -// URIStat 构建 stat 接口的请求命令 -func URIStat(bucket, key string) string { - return fmt.Sprintf("/stat/%s", EncodedEntry(bucket, key)) -} - -// URIDelete 构建 delete 接口的请求命令 -func URIDelete(bucket, key string) string { - return fmt.Sprintf("/delete/%s", EncodedEntry(bucket, key)) -} - -// URICopy 构建 copy 接口的请求命令 -func URICopy(srcBucket, srcKey, destBucket, destKey string, force bool) string { - return fmt.Sprintf("/copy/%s/%s/force/%v", EncodedEntry(srcBucket, srcKey), - EncodedEntry(destBucket, destKey), force) -} - -// URIMove 构建 move 接口的请求命令 -func URIMove(srcBucket, srcKey, destBucket, destKey string, force bool) string { - return fmt.Sprintf("/move/%s/%s/force/%v", EncodedEntry(srcBucket, srcKey), - EncodedEntry(destBucket, destKey), force) -} - -// URIDeleteAfterDays 构建 deleteAfterDays 接口的请求命令 -func URIDeleteAfterDays(bucket, key string, days int) string { - return fmt.Sprintf("/deleteAfterDays/%s/%d", EncodedEntry(bucket, key), days) -} - -// URIChangeMime 构建 chgm 接口的请求命令 -func URIChangeMime(bucket, key, newMime string) string { - return fmt.Sprintf("/chgm/%s/mime/%s", EncodedEntry(bucket, key), - base64.URLEncoding.EncodeToString([]byte(newMime))) -} - -// URIChangeType 构建 chtype 接口的请求命令 -func URIChangeType(bucket, key string, fileType int) string { - return fmt.Sprintf("/chtype/%s/type/%d", EncodedEntry(bucket, key), fileType) -} - -// 构建op的方法,非导出的方法无法用在Batch操作中 -func uriFetch(resURL, bucket, key string) string { - return fmt.Sprintf("/fetch/%s/to/%s", - base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntry(bucket, key)) -} - -func uriFetchWithoutKey(resURL, bucket string) string { - return fmt.Sprintf("/fetch/%s/to/%s", - base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntryWithoutKey(bucket)) -} - -func uriPrefetch(bucket, key string) string { - return fmt.Sprintf("/prefetch/%s", EncodedEntry(bucket, key)) -} - -func uriSetImage(siteURL, bucket string) string { - return fmt.Sprintf("/image/%s/from/%s", bucket, - base64.URLEncoding.EncodeToString([]byte(siteURL))) -} - -func uriSetImageWithHost(siteURL, bucket, host string) string { - return fmt.Sprintf("/image/%s/from/%s/host/%s", bucket, - base64.URLEncoding.EncodeToString([]byte(siteURL)), - base64.URLEncoding.EncodeToString([]byte(host))) -} - -func uriUnsetImage(bucket string) string { - return fmt.Sprintf("/unimage/%s", bucket) -} - -func uriListFiles(bucket, prefix, delimiter, marker string, limit int) string { - query := make(url.Values) - query.Add("bucket", bucket) - if prefix != "" { - query.Add("prefix", prefix) - } - if delimiter != "" { - query.Add("delimiter", delimiter) - } - if marker != "" { - query.Add("marker", marker) - } - if limit > 0 { - query.Add("limit", strconv.FormatInt(int64(limit), 10)) - } - return fmt.Sprintf("/list?%s", query.Encode()) -} - -func uriListFiles2(bucket, prefix, delimiter, marker string) string { - query := make(url.Values) - query.Add("bucket", bucket) - if prefix != "" { - query.Add("prefix", prefix) - } - if delimiter != "" { - query.Add("delimiter", delimiter) - } - if marker != "" { - query.Add("marker", marker) - } - return fmt.Sprintf("/v2/list?%s", query.Encode()) -} - -// EncodedEntry 生成URL Safe Base64编码的 Entry -func EncodedEntry(bucket, key string) string { - entry := fmt.Sprintf("%s:%s", bucket, key) - return base64.URLEncoding.EncodeToString([]byte(entry)) -} - -// EncodedEntryWithoutKey 生成 key 为null的情况下 URL Safe Base64编码的Entry -func EncodedEntryWithoutKey(bucket string) string { - return base64.URLEncoding.EncodeToString([]byte(bucket)) -} - -// MakePublicURL 用来生成公开空间资源下载链接 -func MakePublicURL(domain, key string) (finalUrl string) { - domain = strings.TrimRight(domain, "/") - srcUrl := fmt.Sprintf("%s/%s", domain, key) - srcUri, _ := url.Parse(srcUrl) - finalUrl = srcUri.String() - return -} - -// MakePrivateURL 用来生成私有空间资源下载链接 -func MakePrivateURL(mac *qbox.Mac, domain, key string, deadline int64) (privateURL string) { - publicURL := MakePublicURL(domain, key) - urlToSign := publicURL - if strings.Contains(publicURL, "?") { - urlToSign = fmt.Sprintf("%s&e=%d", urlToSign, deadline) - } else { - urlToSign = fmt.Sprintf("%s?e=%d", urlToSign, deadline) - } - token := mac.Sign([]byte(urlToSign)) - privateURL = fmt.Sprintf("%s&token=%s", urlToSign, token) - return -} diff --git a/vendor/github.com/qiniu/api.v7/storage/config.go b/vendor/github.com/qiniu/api.v7/storage/config.go deleted file mode 100644 index 0c55081..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/config.go +++ /dev/null @@ -1,25 +0,0 @@ -package storage - -// Config 为文件上传,资源管理等配置 -type Config struct { - Zone *Zone //空间所在的机房 - UseHTTPS bool //是否使用https域名 - UseCdnDomains bool //是否使用cdn加速域名 - CentralRsHost string //中心机房的RsHost,用于list bucket - RsHost string - RsfHost string - UpHost string - ApiHost string - IoHost string -} - -func (c *Config) RsReqHost() string { - if c.RsHost == "" { - c.RsHost = DefaultRsHost - } - scheme := "http://" - if c.UseHTTPS { - scheme = "https://" - } - return scheme + c.RsHost -} diff --git a/vendor/github.com/qiniu/api.v7/storage/doc.go b/vendor/github.com/qiniu/api.v7/storage/doc.go deleted file mode 100644 index 13e3305..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// storage 包提供了资源的上传,管理,数据处理等功能。其中资源的上传又提供了表单上传的方式以及分片上传的方式,其中分片上传的方式还支持断点续传。 -// -// 该包中提供了 BucketManager 用来进行资源管理,比如获取文件信息,文件复制,删除,重命名等,以及很多高级功能如修改文件类型, -// 修改文件的生命周期,修改文件的存储类型等。 -// -// 该包中还提供了 FormUploader 和 ResumeUploader 来分别支持表单上传和分片上传,断点续传等功能,对于较大的文件,比如100MB以上的文件,一般 -// 建议采用分片上传的方式来保证上传的效率和可靠性。 -// -// 对于数据处理,则提供了 OperationManager,可以使用它来发送持久化数据处理请求,及查询数据处理的状态。 -package storage diff --git a/vendor/github.com/qiniu/api.v7/storage/form_upload.go b/vendor/github.com/qiniu/api.v7/storage/form_upload.go deleted file mode 100644 index 349fc7c..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/form_upload.go +++ /dev/null @@ -1,352 +0,0 @@ -package storage - -import ( - "bytes" - "context" - "fmt" - "hash" - "hash/crc32" - "io" - "mime/multipart" - "net/http" - "net/textproto" - "os" - "path" - "path/filepath" - "strings" -) - -// PutExtra 为表单上传的额外可选项 -type PutExtra struct { - // 可选,用户自定义参数,必须以 "x:" 开头。若不以x:开头,则忽略。 - Params map[string]string - - UpHost string - - // 可选,当为 "" 时候,服务端自动判断。 - MimeType string - - // 上传事件:进度通知。这个事件的回调函数应该尽可能快地结束。 - OnProgress func(fsize, uploaded int64) -} - -// PutRet 为七牛标准的上传回复内容。 -// 如果使用了上传回调或者自定义了returnBody,那么需要根据实际情况,自己自定义一个返回值结构体 -type PutRet struct { - Hash string `json:"hash"` - PersistentID string `json:"persistentId"` - Key string `json:"key"` -} - -// FormUploader 表示一个表单上传的对象 -type FormUploader struct { - Client *Client - Cfg *Config -} - -// NewFormUploader 用来构建一个表单上传的对象 -func NewFormUploader(cfg *Config) *FormUploader { - if cfg == nil { - cfg = &Config{} - } - - return &FormUploader{ - Client: &DefaultClient, - Cfg: cfg, - } -} - -// NewFormUploaderEx 用来构建一个表单上传的对象 -func NewFormUploaderEx(cfg *Config, client *Client) *FormUploader { - if cfg == nil { - cfg = &Config{} - } - - if client == nil { - client = &DefaultClient - } - - return &FormUploader{ - Client: client, - Cfg: cfg, - } -} - -// PutFile 用来以表单方式上传一个文件,和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.Reader 来访问。 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody,那么返回的数据结构是 PutRet 结构。 -// uptoken 是由业务服务器颁发的上传凭证。 -// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 -// localFile 是要上传的文件的本地路径。 -// extra 是上传的一些可选项,可以指定为nil。详细见 PutExtra 结构的描述。 -// -func (p *FormUploader) PutFile( - ctx context.Context, ret interface{}, uptoken, key, localFile string, extra *PutExtra) (err error) { - return p.putFile(ctx, ret, uptoken, key, true, localFile, extra) -} - -// PutFileWithoutKey 用来以表单方式上传一个文件。不指定文件上传后保存的key的情况下,文件命名方式首先看看 -// uptoken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 -// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.Reader 来访问。 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 -// uptoken 是由业务服务器颁发的上传凭证。 -// localFile 是要上传的文件的本地路径。 -// extra 是上传的一些可选项。可以指定为nil。详细见 PutExtra 结构的描述。 -// -func (p *FormUploader) PutFileWithoutKey( - ctx context.Context, ret interface{}, uptoken, localFile string, extra *PutExtra) (err error) { - return p.putFile(ctx, ret, uptoken, "", false, localFile, extra) -} - -func (p *FormUploader) putFile( - ctx context.Context, ret interface{}, uptoken string, - key string, hasKey bool, localFile string, extra *PutExtra) (err error) { - - f, err := os.Open(localFile) - if err != nil { - return - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return - } - fsize := fi.Size() - - if extra == nil { - extra = &PutExtra{} - } - - return p.put(ctx, ret, uptoken, key, hasKey, f, fsize, extra, filepath.Base(localFile)) -} - -// Put 用来以表单方式上传一个文件。 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 callbackUrl 或 returnBody,那么返回的数据结构是 PutRet 结构。 -// uptoken 是由业务服务器颁发的上传凭证。 -// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 -// data 是文件内容的访问接口(io.Reader)。 -// fsize 是要上传的文件大小。 -// extra 是上传的一些可选项。可以指定为nil。详细见 PutExtra 结构的描述。 -// -func (p *FormUploader) Put( - ctx context.Context, ret interface{}, uptoken, key string, data io.Reader, size int64, extra *PutExtra) (err error) { - err = p.put(ctx, ret, uptoken, key, true, data, size, extra, path.Base(key)) - return -} - -// PutWithoutKey 用来以表单方式上传一个文件。不指定文件上传后保存的key的情况下,文件命名方式首先看看 uptoken 中是否设置了 saveKey, -// 如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 uptoken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 -// uptoken 是由业务服务器颁发的上传凭证。 -// data 是文件内容的访问接口(io.Reader)。 -// fsize 是要上传的文件大小。 -// extra 是上传的一些可选项。详细见 PutExtra 结构的描述。 -// -func (p *FormUploader) PutWithoutKey( - ctx context.Context, ret interface{}, uptoken string, data io.Reader, size int64, extra *PutExtra) (err error) { - err = p.put(ctx, ret, uptoken, "", false, data, size, extra, "filename") - return err -} - -func (p *FormUploader) put( - ctx context.Context, ret interface{}, uptoken string, - key string, hasKey bool, data io.Reader, size int64, extra *PutExtra, fileName string) (err error) { - - var upHost string - if extra.UpHost != "" { - upHost = extra.UpHost - } else { - ak, bucket, gErr := getAkBucketFromUploadToken(uptoken) - if gErr != nil { - err = gErr - return - } - - upHost, err = p.UpHost(ak, bucket) - if err != nil { - return - } - } - - var b bytes.Buffer - writer := multipart.NewWriter(&b) - - if extra == nil { - extra = &PutExtra{} - } - - if extra.OnProgress != nil { - data = &readerWithProgress{reader: data, fsize: size, onProgress: extra.OnProgress} - } - - err = writeMultipart(writer, uptoken, key, hasKey, extra, fileName) - if err != nil { - return - } - - var dataReader io.Reader - - h := crc32.NewIEEE() - dataReader = io.TeeReader(data, h) - crcReader := newCrc32Reader(writer.Boundary(), h) - //write file - head := make(textproto.MIMEHeader) - head.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, - escapeQuotes(fileName))) - if extra.MimeType != "" { - head.Set("Content-Type", extra.MimeType) - } - - _, err = writer.CreatePart(head) - if err != nil { - return - } - - lastLine := fmt.Sprintf("\r\n--%s--\r\n", writer.Boundary()) - r := strings.NewReader(lastLine) - - bodyLen := int64(-1) - if size >= 0 { - bodyLen = int64(b.Len()) + size + int64(len(lastLine)) - bodyLen += crcReader.length() - } - - mr := io.MultiReader(&b, dataReader, crcReader, r) - - contentType := writer.FormDataContentType() - headers := http.Header{} - headers.Add("Content-Type", contentType) - err = p.Client.CallWith64(ctx, ret, "POST", upHost, headers, mr, bodyLen) - if err != nil { - return - } - if extra.OnProgress != nil { - extra.OnProgress(size, size) - } - - return -} - -type crc32Reader struct { - h hash.Hash32 - boundary string - r io.Reader - flag bool - nlDashBoundaryNl string - header string - crc32PadLen int64 -} - -func newCrc32Reader(boundary string, h hash.Hash32) *crc32Reader { - nlDashBoundaryNl := fmt.Sprintf("\r\n--%s\r\n", boundary) - header := `Content-Disposition: form-data; name="crc32"` + "\r\n\r\n" - return &crc32Reader{ - h: h, - boundary: boundary, - nlDashBoundaryNl: nlDashBoundaryNl, - header: header, - crc32PadLen: 10, - } -} - -func (r *crc32Reader) Read(p []byte) (int, error) { - if r.flag == false { - crc32Sum := r.h.Sum32() - crc32Line := r.nlDashBoundaryNl + r.header + fmt.Sprintf("%010d", crc32Sum) //padding crc32 results to 10 digits - r.r = strings.NewReader(crc32Line) - r.flag = true - } - return r.r.Read(p) -} - -func (r crc32Reader) length() (length int64) { - return int64(len(r.nlDashBoundaryNl+r.header)) + r.crc32PadLen -} - -func (p *FormUploader) UpHost(ak, bucket string) (upHost string, err error) { - var zone *Zone - if p.Cfg.Zone != nil { - zone = p.Cfg.Zone - } else { - if v, zoneErr := GetZone(ak, bucket); zoneErr != nil { - err = zoneErr - return - } else { - zone = v - } - } - - scheme := "http://" - if p.Cfg.UseHTTPS { - scheme = "https://" - } - - host := zone.SrcUpHosts[0] - if p.Cfg.UseCdnDomains { - host = zone.CdnUpHosts[0] - } - - upHost = fmt.Sprintf("%s%s", scheme, host) - return -} - -type readerWithProgress struct { - reader io.Reader - uploaded int64 - fsize int64 - onProgress func(fsize, uploaded int64) -} - -func (p *readerWithProgress) Read(b []byte) (n int, err error) { - if p.uploaded > 0 { - p.onProgress(p.fsize, p.uploaded) - } - - n, err = p.reader.Read(b) - p.uploaded += int64(n) - return -} - -func writeMultipart(writer *multipart.Writer, uptoken, key string, hasKey bool, - extra *PutExtra, fileName string) (err error) { - - //token - if err = writer.WriteField("token", uptoken); err != nil { - return - } - - //key - if hasKey { - if err = writer.WriteField("key", key); err != nil { - return - } - } - - //extra.Params - if extra.Params != nil { - for k, v := range extra.Params { - if (strings.HasPrefix(k, "x:") || strings.HasPrefix(k, "x-qn-meta-")) && v != "" { - err = writer.WriteField(k, v) - if err != nil { - return - } - } - } - } - - return err -} - -var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") - -func escapeQuotes(s string) string { - return quoteEscaper.Replace(s) -} diff --git a/vendor/github.com/qiniu/api.v7/storage/pfop.go b/vendor/github.com/qiniu/api.v7/storage/pfop.go deleted file mode 100644 index 361bacf..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/pfop.go +++ /dev/null @@ -1,211 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "github.com/qiniu/api.v7/auth/qbox" - "github.com/qiniu/api.v7/conf" - "net/http" -) - -// OperationManager 提供了数据处理相关的方法 -type OperationManager struct { - Client *Client - Mac *qbox.Mac - Cfg *Config -} - -// NewOperationManager 用来构建一个新的数据处理对象 -func NewOperationManager(mac *qbox.Mac, cfg *Config) *OperationManager { - if cfg == nil { - cfg = &Config{} - } - - return &OperationManager{ - Client: &DefaultClient, - Mac: mac, - Cfg: cfg, - } -} - -// NewOperationManager 用来构建一个新的数据处理对象 -func NewOperationManagerEx(mac *qbox.Mac, cfg *Config, client *Client) *OperationManager { - if cfg == nil { - cfg = &Config{} - } - - if client == nil { - client = &DefaultClient - } - - return &OperationManager{ - Client: client, - Mac: mac, - Cfg: cfg, - } -} - -// PfopRet 为数据处理请求的回复内容 -type PfopRet struct { - PersistentID string `json:"persistentId,omitempty"` -} - -// PrefopRet 为数据处理请求的状态查询回复内容 -type PrefopRet struct { - ID string `json:"id"` - Code int `json:"code"` - Desc string `json:"desc"` - InputBucket string `json:"inputBucket,omitempty"` - InputKey string `json:"inputKey,omitempty"` - Pipeline string `json:"pipeline,omitempty"` - Reqid string `json:"reqid,omitempty"` - Items []FopResult -} - -func (r *PrefopRet) String() string { - strData := fmt.Sprintf("Id: %s\r\nCode: %d\r\nDesc: %s\r\n", r.ID, r.Code, r.Desc) - if r.InputBucket != "" { - strData += fmt.Sprintln(fmt.Sprintf("InputBucket: %s", r.InputBucket)) - } - if r.InputKey != "" { - strData += fmt.Sprintln(fmt.Sprintf("InputKey: %s", r.InputKey)) - } - if r.Pipeline != "" { - strData += fmt.Sprintln(fmt.Sprintf("Pipeline: %s", r.Pipeline)) - } - if r.Reqid != "" { - strData += fmt.Sprintln(fmt.Sprintf("Reqid: %s", r.Reqid)) - } - - strData = fmt.Sprintln(strData) - for _, item := range r.Items { - strData += fmt.Sprintf("\tCmd:\t%s\r\n\tCode:\t%d\r\n\tDesc:\t%s\r\n", item.Cmd, item.Code, item.Desc) - if item.Error != "" { - strData += fmt.Sprintf("\tError:\t%s\r\n", item.Error) - } else { - if item.Hash != "" { - strData += fmt.Sprintf("\tHash:\t%s\r\n", item.Hash) - } - if item.Key != "" { - strData += fmt.Sprintf("\tKey:\t%s\r\n", item.Key) - } - if item.Keys != nil { - if len(item.Keys) > 0 { - strData += "\tKeys: {\r\n" - for _, key := range item.Keys { - strData += fmt.Sprintf("\t\t%s\r\n", key) - } - strData += "\t}\r\n" - } - } - } - strData += "\r\n" - } - return strData -} - -// FopResult 云处理操作列表,包含每个云处理操作的状态信息 -type FopResult struct { - Cmd string `json:"cmd"` - Code int `json:"code"` - Desc string `json:"desc"` - Error string `json:"error,omitempty"` - Hash string `json:"hash,omitempty"` - Key string `json:"key,omitempty"` - Keys []string `json:"keys,omitempty"` -} - -// Pfop 持久化数据处理 -// -// bucket 资源空间 -// key 源资源名 -// fops 云处理操作列表, -// notifyURL 处理结果通知接收URL -// pipeline 多媒体处理队列名称 -// force 强制执行数据处理 -// -func (m *OperationManager) Pfop(bucket, key, fops, pipeline, notifyURL string, - force bool) (persistentID string, err error) { - pfopParams := map[string][]string{ - "bucket": []string{bucket}, - "key": []string{key}, - "fops": []string{fops}, - } - - if pipeline != "" { - pfopParams["pipeline"] = []string{pipeline} - } - - if notifyURL != "" { - pfopParams["notifyURL"] = []string{notifyURL} - } - - if force { - pfopParams["force"] = []string{"1"} - } - var ret PfopRet - ctx := context.WithValue(context.TODO(), "mac", m.Mac) - reqHost, reqErr := m.ApiHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s/pfop/", reqHost) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.CallWithForm(ctx, &ret, "POST", reqURL, headers, pfopParams) - if err != nil { - return - } - - persistentID = ret.PersistentID - return -} - -// Prefop 持久化处理状态查询 -func (m *OperationManager) Prefop(persistentID string) (ret PrefopRet, err error) { - ctx := context.TODO() - reqHost := m.PrefopApiHost(persistentID) - reqURL := fmt.Sprintf("%s/status/get/prefop?id=%s", reqHost, persistentID) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_FORM) - err = m.Client.Call(ctx, &ret, "GET", reqURL, headers) - return -} - -func (m *OperationManager) ApiHost(bucket string) (apiHost string, err error) { - var zone *Zone - if m.Cfg.Zone != nil { - zone = m.Cfg.Zone - } else { - if v, zoneErr := GetZone(m.Mac.AccessKey, bucket); zoneErr != nil { - err = zoneErr - return - } else { - zone = v - } - } - - scheme := "http://" - if m.Cfg.UseHTTPS { - scheme = "https://" - } - apiHost = fmt.Sprintf("%s%s", scheme, zone.ApiHost) - - return -} - -func (m *OperationManager) PrefopApiHost(persistentID string) (apiHost string) { - apiHost = "api.qiniu.com" - if m.Cfg.Zone != nil { - apiHost = m.Cfg.Zone.ApiHost - } - - if m.Cfg.UseHTTPS { - apiHost = fmt.Sprintf("https://%s", apiHost) - } else { - apiHost = fmt.Sprintf("http://%s", apiHost) - } - - return -} diff --git a/vendor/github.com/qiniu/api.v7/storage/resume_base.go b/vendor/github.com/qiniu/api.v7/storage/resume_base.go deleted file mode 100644 index d87316d..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/resume_base.go +++ /dev/null @@ -1,187 +0,0 @@ -package storage - -import ( - "context" - "encoding/base64" - "fmt" - "hash/crc32" - "io" - "net/http" - "strconv" - "strings" - - "github.com/qiniu/api.v7/conf" - "github.com/qiniu/x/bytes.v7" - "github.com/qiniu/x/xlog.v7" -) - -// ResumeUploader 表示一个分片上传的对象 -type ResumeUploader struct { - Client *Client - Cfg *Config -} - -// NewResumeUploader 表示构建一个新的分片上传的对象 -func NewResumeUploader(cfg *Config) *ResumeUploader { - if cfg == nil { - cfg = &Config{} - } - - return &ResumeUploader{ - Cfg: cfg, - Client: &DefaultClient, - } -} - -// NewResumeUploaderEx 表示构建一个新的分片上传的对象 -func NewResumeUploaderEx(cfg *Config, client *Client) *ResumeUploader { - if cfg == nil { - cfg = &Config{} - } - - if client == nil { - client = &DefaultClient - } - - return &ResumeUploader{ - Client: client, - Cfg: cfg, - } -} - -// 创建块请求 -func (p *ResumeUploader) Mkblk( - ctx context.Context, upToken string, upHost string, ret *BlkputRet, blockSize int, body io.Reader, size int) error { - - reqUrl := upHost + "/mkblk/" + strconv.Itoa(blockSize) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_OCTET) - headers.Add("Authorization", "UpToken "+upToken) - - return p.Client.CallWith(ctx, ret, "POST", reqUrl, headers, body, size) -} - -// 发送bput请求 -func (p *ResumeUploader) Bput( - ctx context.Context, upToken string, ret *BlkputRet, body io.Reader, size int) error { - - reqUrl := ret.Host + "/bput/" + ret.Ctx + "/" + strconv.FormatUint(uint64(ret.Offset), 10) - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_OCTET) - headers.Add("Authorization", "UpToken "+upToken) - - return p.Client.CallWith(ctx, ret, "POST", reqUrl, headers, body, size) -} - -// 分片上传请求 -func (p *ResumeUploader) resumableBput( - ctx context.Context, upToken string, upHost string, ret *BlkputRet, f io.ReaderAt, blkIdx, blkSize int, extra *RputExtra) (err error) { - - log := xlog.NewWith(ctx) - h := crc32.NewIEEE() - offbase := int64(blkIdx) << blockBits - chunkSize := extra.ChunkSize - - var bodyLength int - - if ret.Ctx == "" { - - if chunkSize < blkSize { - bodyLength = chunkSize - } else { - bodyLength = blkSize - } - - body1 := io.NewSectionReader(f, offbase, int64(bodyLength)) - body := io.TeeReader(body1, h) - - err = p.Mkblk(ctx, upToken, upHost, ret, blkSize, body, bodyLength) - if err != nil { - return - } - if ret.Crc32 != h.Sum32() || int(ret.Offset) != bodyLength { - err = ErrUnmatchedChecksum - return - } - extra.Notify(blkIdx, blkSize, ret) - } - - for int(ret.Offset) < blkSize { - - if chunkSize < blkSize-int(ret.Offset) { - bodyLength = chunkSize - } else { - bodyLength = blkSize - int(ret.Offset) - } - - tryTimes := extra.TryTimes - - lzRetry: - h.Reset() - body1 := io.NewSectionReader(f, offbase+int64(ret.Offset), int64(bodyLength)) - body := io.TeeReader(body1, h) - - err = p.Bput(ctx, upToken, ret, body, bodyLength) - if err == nil { - if ret.Crc32 == h.Sum32() { - extra.Notify(blkIdx, blkSize, ret) - continue - } - log.Warn("ResumableBlockput: invalid checksum, retry") - err = ErrUnmatchedChecksum - } else { - if ei, ok := err.(*ErrorInfo); ok && ei.Code == InvalidCtx { - ret.Ctx = "" // reset - log.Warn("ResumableBlockput: invalid ctx, please retry") - return - } - log.Warn("ResumableBlockput: bput failed -", err) - } - if tryTimes > 1 { - tryTimes-- - log.Info("ResumableBlockput retrying ...") - goto lzRetry - } - break - } - return -} - -// 创建文件请求 -func (p *ResumeUploader) Mkfile( - ctx context.Context, upToken string, upHost string, ret interface{}, key string, hasKey bool, fsize int64, extra *RputExtra) (err error) { - - url := upHost + "/mkfile/" + strconv.FormatInt(fsize, 10) - - if extra.MimeType != "" { - url += "/mimeType/" + encode(extra.MimeType) - } - if hasKey { - url += "/key/" + encode(key) - } - for k, v := range extra.Params { - if (strings.HasPrefix(k, "x:") || strings.HasPrefix(k, "x-qn-meta-")) && v != "" { - url += fmt.Sprintf("/%s/%s", k, encode(v)) - } - } - - buf := make([]byte, 0, 196*len(extra.Progresses)) - for _, prog := range extra.Progresses { - buf = append(buf, prog.Ctx...) - buf = append(buf, ',') - } - if len(buf) > 0 { - buf = buf[:len(buf)-1] - } - - headers := http.Header{} - headers.Add("Content-Type", conf.CONTENT_TYPE_OCTET) - headers.Add("Authorization", "UpToken "+upToken) - - return p.Client.CallWith( - ctx, ret, "POST", url, headers, bytes.NewReader(buf), len(buf)) -} - -func encode(raw string) string { - return base64.URLEncoding.EncodeToString([]byte(raw)) -} diff --git a/vendor/github.com/qiniu/api.v7/storage/resume_upload.go b/vendor/github.com/qiniu/api.v7/storage/resume_upload.go deleted file mode 100644 index b760e6e..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/resume_upload.go +++ /dev/null @@ -1,316 +0,0 @@ -package storage - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "sync" - - "github.com/qiniu/x/xlog.v7" -) - -// 分片上传过程中可能遇到的错误 -var ( - ErrInvalidPutProgress = errors.New("invalid put progress") - ErrPutFailed = errors.New("resumable put failed") - ErrUnmatchedChecksum = errors.New("unmatched checksum") - ErrBadToken = errors.New("invalid token") -) - -// 上传进度过期错误 -const ( - InvalidCtx = 701 // UP: 无效的上下文(bput),可能情况:Ctx非法或者已经被淘汰(太久未使用) -) - -// 分片上传默认参数设置 -const ( - defaultWorkers = 4 // 默认的并发上传的块数量 - defaultChunkSize = 4 * 1024 * 1024 // 默认的分片大小,4MB - defaultTryTimes = 3 // bput 失败重试次数 -) - -// Settings 为分片上传设置 -type Settings struct { - TaskQsize int // 可选。任务队列大小。为 0 表示取 Workers * 4。 - Workers int // 并行 Goroutine 数目。 - ChunkSize int // 默认的Chunk大小,不设定则为4M - TryTimes int // 默认的尝试次数,不设定则为3 -} - -// 分片上传的默认设置 -var settings = Settings{ - TaskQsize: defaultWorkers * 4, - Workers: defaultWorkers, - ChunkSize: defaultChunkSize, - TryTimes: defaultTryTimes, -} - -// SetSettings 可以用来设置分片上传参数 -func SetSettings(v *Settings) { - settings = *v - if settings.Workers == 0 { - settings.Workers = defaultWorkers - } - if settings.TaskQsize == 0 { - settings.TaskQsize = settings.Workers * 4 - } - if settings.ChunkSize == 0 { - settings.ChunkSize = defaultChunkSize - } - if settings.TryTimes == 0 { - settings.TryTimes = defaultTryTimes - } -} - -var tasks chan func() - -func worker(tasks chan func()) { - for { - task := <-tasks - task() - } -} -func initWorkers() { - tasks = make(chan func(), settings.TaskQsize) - for i := 0; i < settings.Workers; i++ { - go worker(tasks) - } -} - -// 上传完毕块之后的回调 -func notifyNil(blkIdx int, blkSize int, ret *BlkputRet) {} -func notifyErrNil(blkIdx int, blkSize int, err error) {} - -const ( - blockBits = 22 - blockMask = (1 << blockBits) - 1 -) - -// BlockCount 用来计算文件的分块数量 -func BlockCount(fsize int64) int { - return int((fsize + blockMask) >> blockBits) -} - -// BlkputRet 表示分片上传每个片上传完毕的返回值 -type BlkputRet struct { - Ctx string `json:"ctx"` - Checksum string `json:"checksum"` - Crc32 uint32 `json:"crc32"` - Offset uint32 `json:"offset"` - Host string `json:"host"` - ExpiredAt int64 `json:"expired_at"` -} - -// RputExtra 表示分片上传额外可以指定的参数 -type RputExtra struct { - Params map[string]string // 可选。用户自定义参数,以"x:"开头,而且值不能为空,否则忽略 - UpHost string - MimeType string // 可选。 - ChunkSize int // 可选。每次上传的Chunk大小 - TryTimes int // 可选。尝试次数 - Progresses []BlkputRet // 可选。上传进度 - Notify func(blkIdx int, blkSize int, ret *BlkputRet) // 可选。进度提示(注意多个block是并行传输的) - NotifyErr func(blkIdx int, blkSize int, err error) -} - -var once sync.Once - -// Put 方法用来上传一个文件,支持断点续传和分块上传。 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 -// upToken 是由业务服务器颁发的上传凭证。 -// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 -// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。 -// fsize 是要上传的文件大小。 -// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 -// -func (p *ResumeUploader) Put(ctx context.Context, ret interface{}, upToken string, key string, f io.ReaderAt, - fsize int64, extra *RputExtra) (err error) { - err = p.rput(ctx, ret, upToken, key, true, f, fsize, extra) - return -} - -// PutWithoutKey 方法用来上传一个文件,支持断点续传和分块上传。文件命名方式首先看看 -// upToken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 -// upToken 是由业务服务器颁发的上传凭证。 -// f 是文件内容的访问接口。考虑到需要支持分块上传和断点续传,要的是 io.ReaderAt 接口,而不是 io.Reader。 -// fsize 是要上传的文件大小。 -// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 -// -func (p *ResumeUploader) PutWithoutKey( - ctx context.Context, ret interface{}, upToken string, f io.ReaderAt, fsize int64, extra *RputExtra) (err error) { - err = p.rput(ctx, ret, upToken, "", false, f, fsize, extra) - return -} - -// PutFile 用来上传一个文件,支持断点续传和分块上传。 -// 和 Put 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 -// upToken 是由业务服务器颁发的上传凭证。 -// key 是要上传的文件访问路径。比如:"foo/bar.jpg"。注意我们建议 key 不要以 '/' 开头。另外,key 为空字符串是合法的。 -// localFile 是要上传的文件的本地路径。 -// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 -// -func (p *ResumeUploader) PutFile( - ctx context.Context, ret interface{}, upToken, key, localFile string, extra *RputExtra) (err error) { - err = p.rputFile(ctx, ret, upToken, key, true, localFile, extra) - return -} - -// PutFileWithoutKey 上传一个文件,支持断点续传和分块上传。文件命名方式首先看看 -// upToken 中是否设置了 saveKey,如果设置了 saveKey,那么按 saveKey 要求的规则生成 key,否则自动以文件的 hash 做 key。 -// 和 PutWithoutKey 不同的只是一个通过提供文件路径来访问文件内容,一个通过 io.ReaderAt 来访问。 -// -// ctx 是请求的上下文。 -// ret 是上传成功后返回的数据。如果 upToken 中没有设置 CallbackUrl 或 ReturnBody,那么返回的数据结构是 PutRet 结构。 -// upToken 是由业务服务器颁发的上传凭证。 -// localFile 是要上传的文件的本地路径。 -// extra 是上传的一些可选项。详细见 RputExtra 结构的描述。 -// -func (p *ResumeUploader) PutFileWithoutKey( - ctx context.Context, ret interface{}, upToken, localFile string, extra *RputExtra) (err error) { - return p.rputFile(ctx, ret, upToken, "", false, localFile, extra) -} - -func (p *ResumeUploader) rput( - ctx context.Context, ret interface{}, upToken string, - key string, hasKey bool, f io.ReaderAt, fsize int64, extra *RputExtra) (err error) { - - once.Do(initWorkers) - - log := xlog.NewWith(ctx) - blockCnt := BlockCount(fsize) - - if extra == nil { - extra = new(RputExtra) - } - if extra.Progresses == nil { - extra.Progresses = make([]BlkputRet, blockCnt) - } else if len(extra.Progresses) != blockCnt { - return ErrInvalidPutProgress - } - - if extra.ChunkSize == 0 { - extra.ChunkSize = settings.ChunkSize - } - if extra.TryTimes == 0 { - extra.TryTimes = settings.TryTimes - } - if extra.Notify == nil { - extra.Notify = notifyNil - } - if extra.NotifyErr == nil { - extra.NotifyErr = notifyErrNil - } - //get up host - - var upHost string - if extra.UpHost != "" { - upHost = extra.UpHost - } else { - ak, bucket, gErr := getAkBucketFromUploadToken(upToken) - if gErr != nil { - err = gErr - return - } - - upHost, gErr = p.UpHost(ak, bucket) - if gErr != nil { - err = gErr - return - } - } - - var wg sync.WaitGroup - wg.Add(blockCnt) - - last := blockCnt - 1 - blkSize := 1 << blockBits - nfails := 0 - - for i := 0; i < blockCnt; i++ { - blkIdx := i - blkSize1 := blkSize - if i == last { - offbase := int64(blkIdx) << blockBits - blkSize1 = int(fsize - offbase) - } - task := func() { - defer wg.Done() - tryTimes := extra.TryTimes - lzRetry: - err := p.resumableBput(ctx, upToken, upHost, &extra.Progresses[blkIdx], f, blkIdx, blkSize1, extra) - if err != nil { - if tryTimes > 1 { - tryTimes-- - log.Info("resumable.Put retrying ...", blkIdx, "reason:", err) - goto lzRetry - } - log.Warn("resumable.Put", blkIdx, "failed:", err) - extra.NotifyErr(blkIdx, blkSize1, err) - nfails++ - } - } - tasks <- task - } - - wg.Wait() - if nfails != 0 { - return ErrPutFailed - } - - return p.Mkfile(ctx, upToken, upHost, ret, key, hasKey, fsize, extra) -} - -func (p *ResumeUploader) rputFile( - ctx context.Context, ret interface{}, upToken string, - key string, hasKey bool, localFile string, extra *RputExtra) (err error) { - - f, err := os.Open(localFile) - if err != nil { - return - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return - } - - return p.rput(ctx, ret, upToken, key, hasKey, f, fi.Size(), extra) -} - -func (p *ResumeUploader) UpHost(ak, bucket string) (upHost string, err error) { - var zone *Zone - if p.Cfg.Zone != nil { - zone = p.Cfg.Zone - } else { - if v, zoneErr := GetZone(ak, bucket); zoneErr != nil { - err = zoneErr - return - } else { - zone = v - } - } - - scheme := "http://" - if p.Cfg.UseHTTPS { - scheme = "https://" - } - - host := zone.SrcUpHosts[0] - if p.Cfg.UseCdnDomains { - host = zone.CdnUpHosts[0] - } - - upHost = fmt.Sprintf("%s%s", scheme, host) - return -} diff --git a/vendor/github.com/qiniu/api.v7/storage/rpc.go b/vendor/github.com/qiniu/api.v7/storage/rpc.go deleted file mode 100644 index 5a4526e..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/rpc.go +++ /dev/null @@ -1,395 +0,0 @@ -package storage - -// The original library rpc.v7 logic in github.com/qiniu/x has its own bugs -// under the concurrent http calls, we make a fork of the library and fix -// the bug -import ( - // "bufio" - "bytes" - "encoding/json" - "fmt" - "github.com/qiniu/api.v7/auth/qbox" - "github.com/qiniu/api.v7/conf" - "github.com/qiniu/x/reqid.v7" - . "golang.org/x/net/context" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "runtime" - "strings" -) - -var UserAgent = "Golang qiniu/rpc package" -var DefaultClient = Client{&http.Client{Transport: http.DefaultTransport}} - -// -------------------------------------------------------------------- - -type Client struct { - *http.Client -} - -// userApp should be [A-Za-z0-9_\ \-\.]* -func SetAppName(userApp string) error { - UserAgent = fmt.Sprintf( - "QiniuGo/%s (%s; %s; %s) %s", conf.Version, runtime.GOOS, runtime.GOARCH, userApp, runtime.Version()) - return nil -} - -// -------------------------------------------------------------------- - -func newRequest(ctx Context, method, reqUrl string, headers http.Header, body io.Reader) (req *http.Request, err error) { - req, err = http.NewRequest(method, reqUrl, body) - if err != nil { - return - } - - if headers == nil { - headers = http.Header{} - } - - req.Header = headers - - //check access token - mac, ok := ctx.Value("mac").(*qbox.Mac) - if ok { - token, signErr := mac.SignRequest(req) - if signErr != nil { - err = signErr - return - } - req.Header.Add("Authorization", "QBox "+token) - } - - return -} - -func (r Client) DoRequest(ctx Context, method, reqUrl string, headers http.Header) (resp *http.Response, err error) { - req, err := newRequest(ctx, method, reqUrl, headers, nil) - if err != nil { - return - } - return r.Do(ctx, req) -} - -func (r Client) DoRequestWith(ctx Context, method, reqUrl string, headers http.Header, body io.Reader, - bodyLength int) (resp *http.Response, err error) { - - req, err := newRequest(ctx, method, reqUrl, headers, body) - if err != nil { - return - } - req.ContentLength = int64(bodyLength) - return r.Do(ctx, req) -} - -func (r Client) DoRequestWith64(ctx Context, method, reqUrl string, headers http.Header, body io.Reader, - bodyLength int64) (resp *http.Response, err error) { - - req, err := newRequest(ctx, method, reqUrl, headers, body) - if err != nil { - return - } - req.ContentLength = bodyLength - return r.Do(ctx, req) -} - -func (r Client) DoRequestWithForm(ctx Context, method, reqUrl string, headers http.Header, - data map[string][]string) (resp *http.Response, err error) { - - if headers == nil { - headers = http.Header{} - } - headers.Add("Content-Type", "application/x-www-form-urlencoded") - - requestData := url.Values(data).Encode() - if method == "GET" || method == "HEAD" || method == "DELETE" { - if strings.ContainsRune(reqUrl, '?') { - reqUrl += "&" - } else { - reqUrl += "?" - } - return r.DoRequest(ctx, method, reqUrl+requestData, headers) - } - - return r.DoRequestWith(ctx, method, reqUrl, headers, strings.NewReader(requestData), len(requestData)) -} - -func (r Client) DoRequestWithJson(ctx Context, method, reqUrl string, headers http.Header, - data interface{}) (resp *http.Response, err error) { - - reqBody, err := json.Marshal(data) - if err != nil { - return - } - - if headers == nil { - headers = http.Header{} - } - headers.Add("Content-Type", "application/json") - return r.DoRequestWith(ctx, method, reqUrl, headers, bytes.NewReader(reqBody), len(reqBody)) -} - -func (r Client) Do(ctx Context, req *http.Request) (resp *http.Response, err error) { - - if ctx == nil { - ctx = Background() - } - - if reqId, ok := reqid.FromContext(ctx); ok { - req.Header.Set("X-Reqid", reqId) - } - - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", UserAgent) - } - - transport := r.Transport // don't change r.Transport - if transport == nil { - transport = http.DefaultTransport - } - - // avoid cancel() is called before Do(req), but isn't accurate - select { - case <-ctx.Done(): - err = ctx.Err() - return - default: - } - - if tr, ok := getRequestCanceler(transport); ok { - // support CancelRequest - reqC := make(chan bool, 1) - go func() { - resp, err = r.Client.Do(req) - reqC <- true - }() - select { - case <-reqC: - case <-ctx.Done(): - tr.CancelRequest(req) - <-reqC - err = ctx.Err() - } - } else { - resp, err = r.Client.Do(req) - } - return -} - -// -------------------------------------------------------------------- - -type ErrorInfo struct { - Err string `json:"error,omitempty"` - Key string `json:"key,omitempty"` - Reqid string `json:"reqid,omitempty"` - Errno int `json:"errno,omitempty"` - Code int `json:"code"` -} - -func (r *ErrorInfo) ErrorDetail() string { - - msg, _ := json.Marshal(r) - return string(msg) -} - -func (r *ErrorInfo) Error() string { - - return r.Err -} - -func (r *ErrorInfo) RpcError() (code, errno int, key, err string) { - - return r.Code, r.Errno, r.Key, r.Err -} - -func (r *ErrorInfo) HttpCode() int { - - return r.Code -} - -// -------------------------------------------------------------------- - -func parseError(e *ErrorInfo, r io.Reader) { - - body, err1 := ioutil.ReadAll(r) - if err1 != nil { - e.Err = err1.Error() - return - } - - var ret struct { - Err string `json:"error"` - Key string `json:"key"` - Errno int `json:"errno"` - } - if json.Unmarshal(body, &ret) == nil && ret.Err != "" { - // qiniu error msg style returns here - e.Err, e.Key, e.Errno = ret.Err, ret.Key, ret.Errno - return - } - e.Err = string(body) -} - -func ResponseError(resp *http.Response) (err error) { - - e := &ErrorInfo{ - Reqid: resp.Header.Get("X-Reqid"), - Code: resp.StatusCode, - } - if resp.StatusCode > 299 { - if resp.ContentLength != 0 { - ct, ok := resp.Header["Content-Type"] - if ok && strings.HasPrefix(ct[0], "application/json") { - parseError(e, resp.Body) - } - } - } - return e -} - -func CallRetChan(ctx Context, resp *http.Response) (retCh chan listFilesRet2, err error) { - - retCh = make(chan listFilesRet2) - if resp.StatusCode/100 != 2 { - return nil, ResponseError(resp) - } - - go func() { - defer resp.Body.Close() - defer close(retCh) - - dec := json.NewDecoder(resp.Body) - var ret listFilesRet2 - - for { - err = dec.Decode(&ret) - if err != nil { - if err != io.EOF { - fmt.Fprintf(os.Stderr, "decode error: %v\n", err) - } - return - } - select { - case <-ctx.Done(): - return - case retCh <- ret: - } - } - }() - return -} - -func CallRet(ctx Context, ret interface{}, resp *http.Response) (err error) { - - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - - if resp.StatusCode/100 == 2 { - if ret != nil && resp.ContentLength != 0 { - err = json.NewDecoder(resp.Body).Decode(ret) - if err != nil { - return - } - } - if resp.StatusCode == 200 { - return nil - } - } - return ResponseError(resp) -} - -func (r Client) CallWithForm(ctx Context, ret interface{}, method, reqUrl string, headers http.Header, - param map[string][]string) (err error) { - - resp, err := r.DoRequestWithForm(ctx, method, reqUrl, headers, param) - if err != nil { - return err - } - return CallRet(ctx, ret, resp) -} - -func (r Client) CallWithJson(ctx Context, ret interface{}, method, reqUrl string, headers http.Header, - param interface{}) (err error) { - - resp, err := r.DoRequestWithJson(ctx, method, reqUrl, headers, param) - if err != nil { - return err - } - return CallRet(ctx, ret, resp) -} - -func (r Client) CallWith(ctx Context, ret interface{}, method, reqUrl string, headers http.Header, body io.Reader, - bodyLength int) (err error) { - - resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, body, bodyLength) - if err != nil { - return err - } - return CallRet(ctx, ret, resp) -} - -func (r Client) CallWith64(ctx Context, ret interface{}, method, reqUrl string, headers http.Header, body io.Reader, - bodyLength int64) (err error) { - - resp, err := r.DoRequestWith64(ctx, method, reqUrl, headers, body, bodyLength) - if err != nil { - return err - } - return CallRet(ctx, ret, resp) -} - -func (r Client) Call(ctx Context, ret interface{}, method, reqUrl string, headers http.Header) (err error) { - - resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, nil, 0) - if err != nil { - return err - } - return CallRet(ctx, ret, resp) -} - -func (r Client) CallChan(ctx Context, method, reqUrl string, headers http.Header) (chan listFilesRet2, error) { - - resp, err := r.DoRequestWith(ctx, method, reqUrl, headers, nil, 0) - if err != nil { - return nil, err - } - if resp.StatusCode/100 != 2 { - return nil, ResponseError(resp) - } - return CallRetChan(ctx, resp) -} - -// --------------------------------------------------------------------------- - -type requestCanceler interface { - CancelRequest(req *http.Request) -} - -type nestedObjectGetter interface { - NestedObject() interface{} -} - -func getRequestCanceler(tp http.RoundTripper) (rc requestCanceler, ok bool) { - - if rc, ok = tp.(requestCanceler); ok { - return - } - - p := interface{}(tp) - for { - getter, ok1 := p.(nestedObjectGetter) - if !ok1 { - return - } - p = getter.NestedObject() - if rc, ok = p.(requestCanceler); ok { - return - } - } -} - -// -------------------------------------------------------------------- diff --git a/vendor/github.com/qiniu/api.v7/storage/token.go b/vendor/github.com/qiniu/api.v7/storage/token.go deleted file mode 100644 index b9f8b55..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/token.go +++ /dev/null @@ -1,73 +0,0 @@ -package storage - -import ( - "encoding/base64" - "encoding/json" - "errors" - "strings" - "time" - - "github.com/qiniu/api.v7/auth/qbox" -) - -// PutPolicy 表示文件上传的上传策略 -type PutPolicy struct { - Scope string `json:"scope"` - Expires uint32 `json:"deadline"` // 截止时间(以秒为单位) - IsPrefixalScope int `json:"isPrefixalScope,omitempty"` - InsertOnly uint16 `json:"insertOnly,omitempty"` // 若非0, 即使Scope为 Bucket:Key 的形式也是insert only - DetectMime uint8 `json:"detectMime,omitempty"` // 若非0, 则服务端根据内容自动确定 MimeType - FsizeLimit int64 `json:"fsizeLimit,omitempty"` - MimeLimit string `json:"mimeLimit,omitempty"` - SaveKey string `json:"saveKey,omitempty"` - CallbackFetchKey uint8 `json:"callbackFetchKey,omitempty"` - CallbackURL string `json:"callbackUrl,omitempty"` - CallbackHost string `json:"callbackHost,omitempty"` - CallbackBody string `json:"callbackBody,omitempty"` - CallbackBodyType string `json:"callbackBodyType,omitempty"` - ReturnURL string `json:"returnUrl,omitempty"` - ReturnBody string `json:"returnBody,omitempty"` - PersistentOps string `json:"persistentOps,omitempty"` - PersistentNotifyURL string `json:"persistentNotifyUrl,omitempty"` - PersistentPipeline string `json:"persistentPipeline,omitempty"` - EndUser string `json:"endUser,omitempty"` - DeleteAfterDays int `json:"deleteAfterDays,omitempty"` - FileType int `json:"fileType,omitempty"` -} - -// UploadToken 方法用来进行上传凭证的生成 -func (p *PutPolicy) UploadToken(mac *qbox.Mac) (token string) { - if p.Expires == 0 { - p.Expires = 3600 // 1 hour - } - p.Expires += uint32(time.Now().Unix()) - - putPolicyJSON, _ := json.Marshal(p) - token = mac.SignWithData(putPolicyJSON) - return -} - -func getAkBucketFromUploadToken(token string) (ak, bucket string, err error) { - items := strings.Split(token, ":") - if len(items) != 3 { - err = errors.New("invalid upload token, format error") - return - } - - ak = items[0] - policyBytes, dErr := base64.URLEncoding.DecodeString(items[2]) - if dErr != nil { - err = errors.New("invalid upload token, invalid put policy") - return - } - - putPolicy := PutPolicy{} - uErr := json.Unmarshal(policyBytes, &putPolicy) - if uErr != nil { - err = errors.New("invalid upload token, invalid put policy") - return - } - - bucket = strings.Split(putPolicy.Scope, ":")[0] - return -} diff --git a/vendor/github.com/qiniu/api.v7/storage/util.go b/vendor/github.com/qiniu/api.v7/storage/util.go deleted file mode 100644 index 6c8fb8c..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package storage - -import ( - "time" -) - -// ParsePutTime 提供了将PutTime转换为 time.Time 的功能 -func ParsePutTime(putTime int64) (t time.Time) { - t = time.Unix(0, putTime*100) - return -} - -// IsContextExpired 检查分片上传的ctx是否过期,提前一天让它过期 -// 因为我们认为如果断点继续上传的话,最长需要1天时间 -func IsContextExpired(blkPut BlkputRet) bool { - if blkPut.Ctx == "" { - return false - } - target := time.Unix(blkPut.ExpiredAt, 0).AddDate(0, 0, -1) - now := time.Now() - return now.After(target) -} diff --git a/vendor/github.com/qiniu/api.v7/storage/zone.go b/vendor/github.com/qiniu/api.v7/storage/zone.go deleted file mode 100644 index 4131c8f..0000000 --- a/vendor/github.com/qiniu/api.v7/storage/zone.go +++ /dev/null @@ -1,247 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "strings" - "sync" -) - -// Zone 为空间对应的机房属性,主要包括了上传,资源管理等操作的域名 -type Zone struct { - SrcUpHosts []string - CdnUpHosts []string - RsHost string - RsfHost string - ApiHost string - IovipHost string -} - -func (z *Zone) String() string { - str := "" - str += fmt.Sprintf("SrcUpHosts: %v\n", z.SrcUpHosts) - str += fmt.Sprintf("CdnUpHosts: %v\n", z.CdnUpHosts) - str += fmt.Sprintf("IovipHost: %s\n", z.IovipHost) - str += fmt.Sprintf("RsHost: %s\n", z.RsHost) - str += fmt.Sprintf("RsfHost: %s\n", z.RsfHost) - str += fmt.Sprintf("ApiHost: %s\n", z.ApiHost) - return str -} - -func (z *Zone) GetRsfHost(useHttps bool) string { - - scheme := "http://" - if useHttps { - scheme = "https://" - } - - return fmt.Sprintf("%s%s", scheme, z.RsfHost) -} - -func (z *Zone) GetIoHost(useHttps bool) string { - - scheme := "http://" - if useHttps { - scheme = "https://" - } - - return fmt.Sprintf("%s%s", scheme, z.IovipHost) -} - -func (z *Zone) GetRsHost(useHttps bool) string { - - scheme := "http://" - if useHttps { - scheme = "https://" - } - - return fmt.Sprintf("%s%s", scheme, z.RsHost) -} - -func (z *Zone) GetApiHost(useHttps bool) string { - - scheme := "http://" - if useHttps { - scheme = "https://" - } - - return fmt.Sprintf("%s%s", scheme, z.ApiHost) -} - -// ZoneHuadong 表示华东机房 -var ZoneHuadong = Zone{ - SrcUpHosts: []string{ - "up.qiniup.com", - "up-nb.qiniup.com", - "up-xs.qiniup.com", - }, - CdnUpHosts: []string{ - "upload.qiniup.com", - "upload-nb.qiniup.com", - "upload-xs.qiniup.com", - }, - RsHost: "rs.qbox.me", - RsfHost: "rsf.qbox.me", - ApiHost: "api.qiniu.com", - IovipHost: "iovip.qbox.me", -} - -// ZoneHuabei 表示华北机房 -var ZoneHuabei = Zone{ - SrcUpHosts: []string{ - "up-z1.qiniup.com", - }, - CdnUpHosts: []string{ - "upload-z1.qiniup.com", - }, - RsHost: "rs-z1.qbox.me", - RsfHost: "rsf-z1.qbox.me", - ApiHost: "api-z1.qiniu.com", - IovipHost: "iovip-z1.qbox.me", -} - -// ZoneHuanan 表示华南机房 -var ZoneHuanan = Zone{ - SrcUpHosts: []string{ - "up-z2.qiniup.com", - "up-gz.qiniup.com", - "up-fs.qiniup.com", - }, - CdnUpHosts: []string{ - "upload-z2.qiniup.com", - "upload-gz.qiniup.com", - "upload-fs.qiniup.com", - }, - RsHost: "rs-z2.qbox.me", - RsfHost: "rsf-z2.qbox.me", - ApiHost: "api-z2.qiniu.com", - IovipHost: "iovip-z2.qbox.me", -} - -// ZoneBeimei 表示北美机房 -var ZoneBeimei = Zone{ - SrcUpHosts: []string{ - "up-na0.qiniup.com", - }, - CdnUpHosts: []string{ - "upload-na0.qiniup.com", - }, - RsHost: "rs-na0.qbox.me", - RsfHost: "rsf-na0.qbox.me", - ApiHost: "api-na0.qiniu.com", - IovipHost: "iovip-na0.qbox.me", -} - -// ZoneXinjiapo 表示新加坡机房 -var ZoneXinjiapo = Zone{ - SrcUpHosts: []string{ - "up-as0.qiniup.com", - }, - CdnUpHosts: []string{ - "upload-as0.qiniup.com", - }, - RsHost: "rs-as0.qbox.me", - RsfHost: "rsf-as0.qbox.me", - ApiHost: "api-as0.qiniu.com", - IovipHost: "iovip-as0.qbox.me", -} - -// for programmers -var Zone_z0 = ZoneHuadong -var Zone_z1 = ZoneHuabei -var Zone_z2 = ZoneHuanan -var Zone_na0 = ZoneBeimei -var Zone_as0 = ZoneXinjiapo - -// UcHost 为查询空间相关域名的API服务地址 -const UcHost = "https://uc.qbox.me" - -// UcQueryRet 为查询请求的回复 -type UcQueryRet struct { - TTL int `json:"ttl"` - Io map[string]map[string][]string `json:"io"` - Up map[string]UcQueryUp `json:"up"` -} - -// UcQueryUp 为查询请求回复中的上传域名信息 -type UcQueryUp struct { - Main []string `json:"main,omitempty"` - Backup []string `json:"backup,omitempty"` - Info string `json:"info,omitempty"` -} - -var ( - zoneMutext sync.RWMutex - zoneCache = make(map[string]*Zone) -) - -// GetZone 用来根据ak和bucket来获取空间相关的机房信息 -func GetZone(ak, bucket string) (zone *Zone, err error) { - zoneID := fmt.Sprintf("%s:%s", ak, bucket) - //check from cache - zoneMutext.RLock() - if v, ok := zoneCache[zoneID]; ok { - zone = v - } - zoneMutext.RUnlock() - if zone != nil { - return - } - - //query from server - reqURL := fmt.Sprintf("%s/v2/query?ak=%s&bucket=%s", UcHost, ak, bucket) - var ret UcQueryRet - ctx := context.TODO() - qErr := DefaultClient.CallWithForm(ctx, &ret, "GET", reqURL, nil, nil) - if qErr != nil { - err = fmt.Errorf("query zone error, %s", qErr.Error()) - return - } - - ioHost := ret.Io["src"]["main"][0] - srcUpHosts := ret.Up["src"].Main - if ret.Up["src"].Backup != nil { - srcUpHosts = append(srcUpHosts, ret.Up["src"].Backup...) - } - cdnUpHosts := ret.Up["acc"].Main - if ret.Up["acc"].Backup != nil { - cdnUpHosts = append(cdnUpHosts, ret.Up["acc"].Backup...) - } - - zone = &Zone{ - SrcUpHosts: srcUpHosts, - CdnUpHosts: cdnUpHosts, - IovipHost: ioHost, - RsHost: DefaultRsHost, - RsfHost: DefaultRsfHost, - ApiHost: DefaultAPIHost, - } - - //set specific hosts if possible - setSpecificHosts(ioHost, zone) - - zoneMutext.Lock() - zoneCache[zoneID] = zone - zoneMutext.Unlock() - return -} - -func setSpecificHosts(ioHost string, zone *Zone) { - if strings.Contains(ioHost, "-z1") { - zone.RsHost = "rs-z1.qbox.me" - zone.RsfHost = "rsf-z1.qbox.me" - zone.ApiHost = "api-z1.qiniu.com" - } else if strings.Contains(ioHost, "-z2") { - zone.RsHost = "rs-z2.qbox.me" - zone.RsfHost = "rsf-z2.qbox.me" - zone.ApiHost = "api-z2.qiniu.com" - } else if strings.Contains(ioHost, "-na0") { - zone.RsHost = "rs-na0.qbox.me" - zone.RsfHost = "rsf-na0.qbox.me" - zone.ApiHost = "api-na0.qiniu.com" - } else if strings.Contains(ioHost, "-as0") { - zone.RsHost = "rs-as0.qbox.me" - zone.RsfHost = "rsf-as0.qbox.me" - zone.ApiHost = "api-as0.qiniu.com" - } -} diff --git a/vendor/github.com/qiniu/api.v7/test-env.sh b/vendor/github.com/qiniu/api.v7/test-env.sh deleted file mode 100644 index d412685..0000000 --- a/vendor/github.com/qiniu/api.v7/test-env.sh +++ /dev/null @@ -1,10 +0,0 @@ -DIR=$(cd ../; pwd) -export GOPATH=$DIR:$GOPATH -export QINIU_ACCESS_KEY=ak -export QINIU_SECRET_KEY=sk -export QINIU_TEST_BUCKET=gosdk -export QINIU_TEST_BUCKET_PRIVATE=gosdk.qiniudn.com -export QINIU_TEST_DOMAIN=gosdk.qiniudn.com -export QINIU_TEST_DOMAIN_PRIVATE=gosdk.qiniudn.com -export QINIU_TEST_PIPELINE=sdktest -export TRAVIS_BUILD_DIR=/Users/jemy/Downloads \ No newline at end of file diff --git a/vendor/github.com/qiniu/x/bytes.v7/README.md b/vendor/github.com/qiniu/x/bytes.v7/README.md deleted file mode 100644 index 028478b..0000000 --- a/vendor/github.com/qiniu/x/bytes.v7/README.md +++ /dev/null @@ -1,4 +0,0 @@ -qiniupkg.com/x/bytes.v7 -===== - -Extension module of golang bytes processing diff --git a/vendor/github.com/qiniu/x/bytes.v7/bytes.go b/vendor/github.com/qiniu/x/bytes.v7/bytes.go deleted file mode 100644 index a4a4f63..0000000 --- a/vendor/github.com/qiniu/x/bytes.v7/bytes.go +++ /dev/null @@ -1,177 +0,0 @@ -package bytes - -import ( - "io" - "syscall" -) - -// --------------------------------------------------- - -type Reader struct { - b []byte - off int -} - -func NewReader(val []byte) *Reader { - return &Reader{val, 0} -} - -func (r *Reader) Len() int { - if r.off >= len(r.b) { - return 0 - } - return len(r.b) - r.off -} - -func (r *Reader) Bytes() []byte { - return r.b[r.off:] -} - -func (r *Reader) SeekToBegin() (err error) { - r.off = 0 - return -} - -func (r *Reader) Seek(offset int64, whence int) (ret int64, err error) { - switch whence { - case 0: - case 1: - offset += int64(r.off) - case 2: - offset += int64(len(r.b)) - default: - err = syscall.EINVAL - return - } - if offset < 0 { - err = syscall.EINVAL - return - } - if offset >= int64(len(r.b)) { - r.off = len(r.b) - } else { - r.off = int(offset) - } - ret = int64(r.off) - return -} - -func (r *Reader) Read(val []byte) (n int, err error) { - n = copy(val, r.b[r.off:]) - if n == 0 && len(val) != 0 { - err = io.EOF - return - } - r.off += n - return -} - -func (r *Reader) Close() (err error) { - return -} - -// --------------------------------------------------- - -type Writer struct { - b []byte - n int -} - -func NewWriter(buff []byte) *Writer { - return &Writer{buff, 0} -} - -func (p *Writer) Write(val []byte) (n int, err error) { - n = copy(p.b[p.n:], val) - if n == 0 && len(val) > 0 { - err = io.EOF - return - } - p.n += n - return -} - -func (p *Writer) Len() int { - return p.n -} - -func (p *Writer) Bytes() []byte { - return p.b[:p.n] -} - -func (p *Writer) Reset() { - p.n = 0 -} - -// --------------------------------------------------- - -type Buffer struct { - b []byte -} - -func NewBuffer() *Buffer { - return new(Buffer) -} - -func (p *Buffer) ReadAt(buf []byte, off int64) (n int, err error) { - ioff := int(off) - if len(p.b) <= ioff { - return 0, io.EOF - } - n = copy(buf, p.b[ioff:]) - if n != len(buf) { - err = io.EOF - } - return -} - -func (p *Buffer) WriteAt(buf []byte, off int64) (n int, err error) { - ioff := int(off) - iend := ioff + len(buf) - if len(p.b) < iend { - if len(p.b) == ioff { - p.b = append(p.b, buf...) - return len(buf), nil - } - zero := make([]byte, iend-len(p.b)) - p.b = append(p.b, zero...) - } - copy(p.b[ioff:], buf) - return len(buf), nil -} - -func (p *Buffer) WriteStringAt(buf string, off int64) (n int, err error) { - ioff := int(off) - iend := ioff + len(buf) - if len(p.b) < iend { - if len(p.b) == ioff { - p.b = append(p.b, buf...) - return len(buf), nil - } - zero := make([]byte, iend-len(p.b)) - p.b = append(p.b, zero...) - } - copy(p.b[ioff:], buf) - return len(buf), nil -} - -func (p *Buffer) Truncate(fsize int64) (err error) { - size := int(fsize) - if len(p.b) < size { - zero := make([]byte, size-len(p.b)) - p.b = append(p.b, zero...) - } else { - p.b = p.b[:size] - } - return nil -} - -func (p *Buffer) Buffer() []byte { - return p.b -} - -func (p *Buffer) Len() int { - return len(p.b) -} - -// --------------------------------------------------- diff --git a/vendor/github.com/qiniu/x/bytes.v7/doc.go b/vendor/github.com/qiniu/x/bytes.v7/doc.go deleted file mode 100644 index cdec535..0000000 --- a/vendor/github.com/qiniu/x/bytes.v7/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -包 qiniupkg.com/x/bytes.v7 提供了 byte slice 相关的功能扩展 - -NewReader 创建一个 byte slice 的只读流: - - var slice []byte - ... - r := bytes.NewReader(slice) - ... - r.Seek(0, 0) // r.SeekToBegin() - ... - -和标准库的 bytes.NewReader 不同的是,这里的 Reader 支持 Seek。 - -NewWriter 创建一个有上限容量的写流: - - slice := make([]byte, 1024) - w := bytes.NewWriter(slice) - ... - writtenData := w.Bytes() - -如果我们向 w 里面写入超过 1024 字节的数据,那么多余的数据会被丢弃。 - -NewBuffer 创建一个可随机读写的内存文件,支持 ReadAt/WriteAt 方法,而不是 Read/Write: - - b := bytes.NewBuffer() - b.Truncate(100) - b.WriteAt([]byte("hello"), 100) - slice := make([]byte, 105) - n, err := b.ReadAt(slice, 0) - ... -*/ -package bytes - diff --git a/vendor/github.com/qiniu/x/bytes.v7/replace.go b/vendor/github.com/qiniu/x/bytes.v7/replace.go deleted file mode 100644 index a62a161..0000000 --- a/vendor/github.com/qiniu/x/bytes.v7/replace.go +++ /dev/null @@ -1,54 +0,0 @@ -package bytes - -import ( - "bytes" -) - -// --------------------------------------------------- - -func ReplaceAt(b []byte, off, nsrc int, dest []byte) []byte { - - ndelta := len(dest) - nsrc - if ndelta < 0 { - left := b[off+nsrc:] - off += copy(b[off:], dest) - off += copy(b[off:], left) - return b[:off] - } - - if ndelta > 0 { - b = append(b, dest[:ndelta]...) - copy(b[off+len(dest):], b[off+nsrc:]) - copy(b[off:], dest) - } else { - copy(b[off:], dest) - } - return b -} - -func ReplaceOne(b []byte, from int, src, dest []byte) ([]byte, int) { - - pos := bytes.Index(b[from:], src) - if pos < 0 { - return b, -1 - } - - from += pos - return ReplaceAt(b, from, len(src), dest), from + len(dest) -} - -func Replace(b []byte, src, dest []byte, n int) []byte { - - from := 0 - for n != 0 { - b, from = ReplaceOne(b, from, src, dest) - if from < 0 { - break - } - n-- - } - return b -} - -// --------------------------------------------------- - diff --git a/vendor/github.com/qiniu/x/bytes.v7/seekable/seekable.go b/vendor/github.com/qiniu/x/bytes.v7/seekable/seekable.go deleted file mode 100644 index 3d718e1..0000000 --- a/vendor/github.com/qiniu/x/bytes.v7/seekable/seekable.go +++ /dev/null @@ -1,63 +0,0 @@ -// This package provide a method to read and replace http.Request's body. -package seekable - -import ( - "errors" - "io" - "io/ioutil" - "net/http" - - "qiniupkg.com/x/bytes.v7" -) - -// --------------------------------------------------- - -type Seekabler interface { - Bytes() []byte - Read(val []byte) (n int, err error) - SeekToBegin() error -} - -type SeekableCloser interface { - Seekabler - io.Closer -} - -// --------------------------------------------------- - -type readCloser struct { - Seekabler - io.Closer -} - -var ErrNoBody = errors.New("no body") - -func New(req *http.Request) (r SeekableCloser, err error) { - if req.Body == nil { - return nil, ErrNoBody - } - var ok bool - if r, ok = req.Body.(SeekableCloser); ok { - return - } - b, err2 := ReadAll(req) - if err2 != nil { - return nil, err2 - } - r = bytes.NewReader(b) - req.Body = readCloser{r, req.Body} - return -} - -func ReadAll(req *http.Request) (b []byte, err error) { - if req.ContentLength > 0 { - b = make([]byte, int(req.ContentLength)) - _, err = io.ReadFull(req.Body, b) - return - } else if req.ContentLength == 0 { - return nil, ErrNoBody - } - return ioutil.ReadAll(req.Body) -} - -// --------------------------------------------------- diff --git a/vendor/github.com/qiniu/x/reqid.v7/reqid.go b/vendor/github.com/qiniu/x/reqid.v7/reqid.go deleted file mode 100644 index fc1528b..0000000 --- a/vendor/github.com/qiniu/x/reqid.v7/reqid.go +++ /dev/null @@ -1,52 +0,0 @@ -package reqid - -import ( - "encoding/binary" - "encoding/base64" - "net/http" - "time" - - . "golang.org/x/net/context" -) - -// -------------------------------------------------------------------- - -var pid = uint32(time.Now().UnixNano() % 4294967291) - -func genReqId() string { - var b [12]byte - binary.LittleEndian.PutUint32(b[:], pid) - binary.LittleEndian.PutUint64(b[4:], uint64(time.Now().UnixNano())) - return base64.URLEncoding.EncodeToString(b[:]) -} - -// -------------------------------------------------------------------- - -type key int // key is unexported and used for Context - -const ( - reqidKey key = 0 -) - -func NewContext(ctx Context, reqid string) Context { - return WithValue(ctx, reqidKey, reqid) -} - -func NewContextWith(ctx Context, w http.ResponseWriter, req *http.Request) Context { - reqid := req.Header.Get("X-Reqid") - if reqid == "" { - reqid = genReqId() - req.Header.Set("X-Reqid", reqid) - } - h := w.Header() - h.Set("X-Reqid", reqid) - return WithValue(ctx, reqidKey, reqid) -} - -func FromContext(ctx Context) (reqid string, ok bool) { - reqid, ok = ctx.Value(reqidKey).(string) - return -} - -// -------------------------------------------------------------------- - diff --git a/vendor/github.com/qiniu/x/xlog.v7/xlog.go b/vendor/github.com/qiniu/x/xlog.v7/xlog.go deleted file mode 100644 index 2f627da..0000000 --- a/vendor/github.com/qiniu/x/xlog.v7/xlog.go +++ /dev/null @@ -1,211 +0,0 @@ -package xlog - -import ( - "fmt" - "io" - "os" - "runtime" - - "qiniupkg.com/x/log.v7" - "qiniupkg.com/x/reqid.v7" - - . "golang.org/x/net/context" -) - -const ( - Ldate = log.Ldate - Ltime = log.Ltime - Lmicroseconds = log.Lmicroseconds - Llongfile = log.Llongfile - Lshortfile = log.Lshortfile - Lmodule = log.Lmodule - Llevel = log.Llevel - LstdFlags = log.LstdFlags - Ldefault = log.Ldefault -) - -const ( - Ldebug = log.Ldebug - Linfo = log.Linfo - Lwarn = log.Lwarn - Lerror = log.Lerror - Lpanic = log.Lpanic - Lfatal = log.Lfatal -) - -// ============================================================================ -// type *Logger - -type Logger struct { - ReqId string -} - -func New(reqId string) *Logger { - - return &Logger{reqId} -} - -func NewWith(ctx Context) *Logger { - - reqId, ok := reqid.FromContext(ctx) - if !ok { - log.Debug("xlog.New: reqid isn't find in context") - } - return &Logger{reqId} -} - -func (xlog *Logger) Spawn(child string) *Logger { - - return &Logger{xlog.ReqId + "." + child} -} - -// ============================================================================ - -// Print calls Output to print to the standard Logger. -// Arguments are handled in the manner of fmt.Print. -func (xlog *Logger) Print(v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprint(v...)) -} - -// Printf calls Output to print to the standard Logger. -// Arguments are handled in the manner of fmt.Printf. -func (xlog *Logger) Printf(format string, v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprintf(format, v...)) -} - -// Println calls Output to print to the standard Logger. -// Arguments are handled in the manner of fmt.Println. -func (xlog *Logger) Println(v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprintln(v...)) -} - -// ----------------------------------------- - -func (xlog *Logger) Debugf(format string, v ...interface{}) { - if log.Ldebug < log.Std.Level { - return - } - log.Std.Output(xlog.ReqId, log.Ldebug, 2, fmt.Sprintf(format, v...)) -} - -func (xlog *Logger) Debug(v ...interface{}) { - if log.Ldebug < log.Std.Level { - return - } - log.Std.Output(xlog.ReqId, log.Ldebug, 2, fmt.Sprintln(v...)) -} - -// ----------------------------------------- - -func (xlog *Logger) Infof(format string, v ...interface{}) { - if log.Linfo < log.Std.Level { - return - } - log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprintf(format, v...)) -} - -func (xlog *Logger) Info(v ...interface{}) { - if log.Linfo < log.Std.Level { - return - } - log.Std.Output(xlog.ReqId, log.Linfo, 2, fmt.Sprintln(v...)) -} - -// ----------------------------------------- - -func (xlog *Logger) Warnf(format string, v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Lwarn, 2, fmt.Sprintf(format, v...)) -} - -func (xlog *Logger) Warn(v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Lwarn, 2, fmt.Sprintln(v...)) -} - -// ----------------------------------------- - -func (xlog *Logger) Errorf(format string, v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Lerror, 2, fmt.Sprintf(format, v...)) -} - -func (xlog *Logger) Error(v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Lerror, 2, fmt.Sprintln(v...)) -} - -// ----------------------------------------- - -// Fatal is equivalent to Print() followed by a call to os.Exit(1). -func (xlog *Logger) Fatal(v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Lfatal, 2, fmt.Sprint(v...)) - os.Exit(1) -} - -// Fatalf is equivalent to Printf() followed by a call to os.Exit(1). -func (xlog *Logger) Fatalf(format string, v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Lfatal, 2, fmt.Sprintf(format, v...)) - os.Exit(1) -} - -// Fatalln is equivalent to Println() followed by a call to os.Exit(1). -func (xlog *Logger) Fatalln(v ...interface{}) { - log.Std.Output(xlog.ReqId, log.Lfatal, 2, fmt.Sprintln(v...)) - os.Exit(1) -} - -// ----------------------------------------- - -// Panic is equivalent to Print() followed by a call to panic(). -func (xlog *Logger) Panic(v ...interface{}) { - s := fmt.Sprint(v...) - log.Std.Output(xlog.ReqId, log.Lpanic, 2, s) - panic(s) -} - -// Panicf is equivalent to Printf() followed by a call to panic(). -func (xlog *Logger) Panicf(format string, v ...interface{}) { - s := fmt.Sprintf(format, v...) - log.Std.Output(xlog.ReqId, log.Lpanic, 2, s) - panic(s) -} - -// Panicln is equivalent to Println() followed by a call to panic(). -func (xlog *Logger) Panicln(v ...interface{}) { - s := fmt.Sprintln(v...) - log.Std.Output(xlog.ReqId, log.Lpanic, 2, s) - panic(s) -} - -func (xlog *Logger) Stack(v ...interface{}) { - s := fmt.Sprint(v...) - s += "\n" - buf := make([]byte, 1024*1024) - n := runtime.Stack(buf, true) - s += string(buf[:n]) - s += "\n" - log.Std.Output(xlog.ReqId, log.Lerror, 2, s) -} - -func (xlog *Logger) SingleStack(v ...interface{}) { - s := fmt.Sprint(v...) - s += "\n" - buf := make([]byte, 1024*1024) - n := runtime.Stack(buf, false) - s += string(buf[:n]) - s += "\n" - log.Std.Output(xlog.ReqId, log.Lerror, 2, s) -} - -// ============================================================================ - -func SetOutput(w io.Writer) { - log.SetOutput(w) -} - -func SetFlags(flag int) { - log.SetFlags(flag) -} - -func SetOutputLevel(lvl int) { - log.SetOutputLevel(lvl) -} - -// ============================================================================ diff --git a/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md b/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md deleted file mode 100755 index 8262386..0000000 --- a/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md +++ /dev/null @@ -1,12 +0,0 @@ -# Contributing - -In general, the code posted to the [SmartyStreets github organization](https://github.com/smartystreets) is created to solve specific problems at SmartyStreets that are ancillary to our core products in the address verification industry and may or may not be useful to other organizations or developers. Our reason for posting said code isn't necessarily to solicit feedback or contributions from the community but more as a showcase of some of the approaches to solving problems we have adopted. - -Having stated that, we do consider issues raised by other githubbers as well as contributions submitted via pull requests. When submitting such a pull request, please follow these guidelines: - -- _Look before you leap:_ If the changes you plan to make are significant, it's in everyone's best interest for you to discuss them with a SmartyStreets team member prior to opening a pull request. -- _License and ownership:_ If modifying the `LICENSE.md` file, limit your changes to fixing typographical mistakes. Do NOT modify the actual terms in the license or the copyright by **SmartyStreets, LLC**. Code submitted to SmartyStreets projects becomes property of SmartyStreets and must be compatible with the associated license. -- _Testing:_ If the code you are submitting resides in packages/modules covered by automated tests, be sure to add passing tests that cover your changes and assert expected behavior and state. Submit the additional test cases as part of your change set. -- _Style:_ Match your approach to **naming** and **formatting** with the surrounding code. Basically, the code you submit shouldn't stand out. - - "Naming" refers to such constructs as variables, methods, functions, classes, structs, interfaces, packages, modules, directories, files, etc... - - "Formatting" refers to such constructs as whitespace, horizontal line length, vertical function length, vertical file length, indentation, curly braces, etc... diff --git a/vendor/github.com/smartystreets/assertions/LICENSE.md b/vendor/github.com/smartystreets/assertions/LICENSE.md deleted file mode 100755 index 13f3f6e..0000000 --- a/vendor/github.com/smartystreets/assertions/LICENSE.md +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2016 SmartyStreets, LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -NOTE: Various optional and subordinate components carry their own licensing -requirements and restrictions. Use of those components is subject to the terms -and conditions outlined the respective license of each component. diff --git a/vendor/github.com/smartystreets/assertions/README.md b/vendor/github.com/smartystreets/assertions/README.md deleted file mode 100755 index fb21112..0000000 --- a/vendor/github.com/smartystreets/assertions/README.md +++ /dev/null @@ -1,575 +0,0 @@ -# assertions --- - import "github.com/smartystreets/assertions" - -Package assertions contains the implementations for all assertions which are -referenced in goconvey's `convey` package -(github.com/smartystreets/goconvey/convey) and gunit -(github.com/smartystreets/gunit) for use with the So(...) method. They can also -be used in traditional Go test functions and even in applications. - -Many of the assertions lean heavily on work done by Aaron Jacobs in his -excellent oglematchers library. (https://github.com/jacobsa/oglematchers) The -ShouldResemble assertion leans heavily on work done by Daniel Jacques in his -very helpful go-render library. (https://github.com/luci/go-render) - -## Usage - -#### func GoConveyMode - -```go -func GoConveyMode(yes bool) -``` -GoConveyMode provides control over JSON serialization of failures. When using -the assertions in this package from the convey package JSON results are very -helpful and can be rendered in a DIFF view. In that case, this function will be -called with a true value to enable the JSON serialization. By default, the -assertions in this package will not serializer a JSON result, making standalone -ussage more convenient. - -#### func ShouldAlmostEqual - -```go -func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string -``` -ShouldAlmostEqual makes sure that two parameters are close enough to being -equal. The acceptable delta may be specified with a third argument, or a very -small default delta will be used. - -#### func ShouldBeBetween - -```go -func ShouldBeBetween(actual interface{}, expected ...interface{}) string -``` -ShouldBeBetween receives exactly three parameters: an actual value, a lower -bound, and an upper bound. It ensures that the actual value is between both -bounds (but not equal to either of them). - -#### func ShouldBeBetweenOrEqual - -```go -func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string -``` -ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a -lower bound, and an upper bound. It ensures that the actual value is between -both bounds or equal to one of them. - -#### func ShouldBeBlank - -```go -func ShouldBeBlank(actual interface{}, expected ...interface{}) string -``` -ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal -to "". - -#### func ShouldBeChronological - -```go -func ShouldBeChronological(actual interface{}, expected ...interface{}) string -``` -ShouldBeChronological receives a []time.Time slice and asserts that the are in -chronological order starting with the first time.Time as the earliest. - -#### func ShouldBeEmpty - -```go -func ShouldBeEmpty(actual interface{}, expected ...interface{}) string -``` -ShouldBeEmpty receives a single parameter (actual) and determines whether or not -calling len(actual) would return `0`. It obeys the rules specified by the len -function for determining length: http://golang.org/pkg/builtin/#len - -#### func ShouldBeFalse - -```go -func ShouldBeFalse(actual interface{}, expected ...interface{}) string -``` -ShouldBeFalse receives a single parameter and ensures that it is false. - -#### func ShouldBeGreaterThan - -```go -func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string -``` -ShouldBeGreaterThan receives exactly two parameters and ensures that the first -is greater than the second. - -#### func ShouldBeGreaterThanOrEqualTo - -```go -func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string -``` -ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that -the first is greater than or equal to the second. - -#### func ShouldBeIn - -```go -func ShouldBeIn(actual interface{}, expected ...interface{}) string -``` -ShouldBeIn receives at least 2 parameters. The first is a proposed member of the -collection that is passed in either as the second parameter, or of the -collection that is comprised of all the remaining parameters. This assertion -ensures that the proposed member is in the collection (using ShouldEqual). - -#### func ShouldBeLessThan - -```go -func ShouldBeLessThan(actual interface{}, expected ...interface{}) string -``` -ShouldBeLessThan receives exactly two parameters and ensures that the first is -less than the second. - -#### func ShouldBeLessThanOrEqualTo - -```go -func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string -``` -ShouldBeLessThan receives exactly two parameters and ensures that the first is -less than or equal to the second. - -#### func ShouldBeNil - -```go -func ShouldBeNil(actual interface{}, expected ...interface{}) string -``` -ShouldBeNil receives a single parameter and ensures that it is nil. - -#### func ShouldBeTrue - -```go -func ShouldBeTrue(actual interface{}, expected ...interface{}) string -``` -ShouldBeTrue receives a single parameter and ensures that it is true. - -#### func ShouldBeZeroValue - -```go -func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string -``` -ShouldBeZeroValue receives a single parameter and ensures that it is the Go -equivalent of the default value, or "zero" value. - -#### func ShouldContain - -```go -func ShouldContain(actual interface{}, expected ...interface{}) string -``` -ShouldContain receives exactly two parameters. The first is a slice and the -second is a proposed member. Membership is determined using ShouldEqual. - -#### func ShouldContainKey - -```go -func ShouldContainKey(actual interface{}, expected ...interface{}) string -``` -ShouldContainKey receives exactly two parameters. The first is a map and the -second is a proposed key. Keys are compared with a simple '=='. - -#### func ShouldContainSubstring - -```go -func ShouldContainSubstring(actual interface{}, expected ...interface{}) string -``` -ShouldContainSubstring receives exactly 2 string parameters and ensures that the -first contains the second as a substring. - -#### func ShouldEndWith - -```go -func ShouldEndWith(actual interface{}, expected ...interface{}) string -``` -ShouldEndWith receives exactly 2 string parameters and ensures that the first -ends with the second. - -#### func ShouldEqual - -```go -func ShouldEqual(actual interface{}, expected ...interface{}) string -``` -ShouldEqual receives exactly two parameters and does an equality check. - -#### func ShouldEqualTrimSpace - -```go -func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string -``` -ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the -first is equal to the second after removing all leading and trailing whitespace -using strings.TrimSpace(first). - -#### func ShouldEqualWithout - -```go -func ShouldEqualWithout(actual interface{}, expected ...interface{}) string -``` -ShouldEqualWithout receives exactly 3 string parameters and ensures that the -first is equal to the second after removing all instances of the third from the -first using strings.Replace(first, third, "", -1). - -#### func ShouldHappenAfter - -```go -func ShouldHappenAfter(actual interface{}, expected ...interface{}) string -``` -ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the -first happens after the second. - -#### func ShouldHappenBefore - -```go -func ShouldHappenBefore(actual interface{}, expected ...interface{}) string -``` -ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the -first happens before the second. - -#### func ShouldHappenBetween - -```go -func ShouldHappenBetween(actual interface{}, expected ...interface{}) string -``` -ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the -first happens between (not on) the second and third. - -#### func ShouldHappenOnOrAfter - -```go -func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string -``` -ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that -the first happens on or after the second. - -#### func ShouldHappenOnOrBefore - -```go -func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string -``` -ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that -the first happens on or before the second. - -#### func ShouldHappenOnOrBetween - -```go -func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string -``` -ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that -the first happens between or on the second and third. - -#### func ShouldHappenWithin - -```go -func ShouldHappenWithin(actual interface{}, expected ...interface{}) string -``` -ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 -arguments) and asserts that the first time.Time happens within or on the -duration specified relative to the other time.Time. - -#### func ShouldHaveLength - -```go -func ShouldHaveLength(actual interface{}, expected ...interface{}) string -``` -ShouldHaveLength receives 2 parameters. The first is a collection to check the -length of, the second being the expected length. It obeys the rules specified by -the len function for determining length: http://golang.org/pkg/builtin/#len - -#### func ShouldHaveSameTypeAs - -```go -func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string -``` -ShouldHaveSameTypeAs receives exactly two parameters and compares their -underlying types for equality. - -#### func ShouldImplement - -```go -func ShouldImplement(actual interface{}, expectedList ...interface{}) string -``` -ShouldImplement receives exactly two parameters and ensures that the first -implements the interface type of the second. - -#### func ShouldNotAlmostEqual - -```go -func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string -``` -ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual - -#### func ShouldNotBeBetween - -```go -func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string -``` -ShouldNotBeBetween receives exactly three parameters: an actual value, a lower -bound, and an upper bound. It ensures that the actual value is NOT between both -bounds. - -#### func ShouldNotBeBetweenOrEqual - -```go -func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string -``` -ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a -lower bound, and an upper bound. It ensures that the actual value is nopt -between the bounds nor equal to either of them. - -#### func ShouldNotBeBlank - -```go -func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string -``` -ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is -equal to "". - -#### func ShouldNotBeEmpty - -```go -func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string -``` -ShouldNotBeEmpty receives a single parameter (actual) and determines whether or -not calling len(actual) would return a value greater than zero. It obeys the -rules specified by the `len` function for determining length: -http://golang.org/pkg/builtin/#len - -#### func ShouldNotBeIn - -```go -func ShouldNotBeIn(actual interface{}, expected ...interface{}) string -``` -ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of -the collection that is passed in either as the second parameter, or of the -collection that is comprised of all the remaining parameters. This assertion -ensures that the proposed member is NOT in the collection (using ShouldEqual). - -#### func ShouldNotBeNil - -```go -func ShouldNotBeNil(actual interface{}, expected ...interface{}) string -``` -ShouldNotBeNil receives a single parameter and ensures that it is not nil. - -#### func ShouldNotContain - -```go -func ShouldNotContain(actual interface{}, expected ...interface{}) string -``` -ShouldNotContain receives exactly two parameters. The first is a slice and the -second is a proposed member. Membership is determinied using ShouldEqual. - -#### func ShouldNotContainKey - -```go -func ShouldNotContainKey(actual interface{}, expected ...interface{}) string -``` -ShouldNotContainKey receives exactly two parameters. The first is a map and the -second is a proposed absent key. Keys are compared with a simple '=='. - -#### func ShouldNotContainSubstring - -```go -func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string -``` -ShouldNotContainSubstring receives exactly 2 string parameters and ensures that -the first does NOT contain the second as a substring. - -#### func ShouldNotEndWith - -```go -func ShouldNotEndWith(actual interface{}, expected ...interface{}) string -``` -ShouldEndWith receives exactly 2 string parameters and ensures that the first -does not end with the second. - -#### func ShouldNotEqual - -```go -func ShouldNotEqual(actual interface{}, expected ...interface{}) string -``` -ShouldNotEqual receives exactly two parameters and does an inequality check. - -#### func ShouldNotHappenOnOrBetween - -```go -func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string -``` -ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts -that the first does NOT happen between or on the second or third. - -#### func ShouldNotHappenWithin - -```go -func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string -``` -ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 -arguments) and asserts that the first time.Time does NOT happen within or on the -duration specified relative to the other time.Time. - -#### func ShouldNotHaveSameTypeAs - -```go -func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string -``` -ShouldNotHaveSameTypeAs receives exactly two parameters and compares their -underlying types for inequality. - -#### func ShouldNotImplement - -```go -func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string -``` -ShouldNotImplement receives exactly two parameters and ensures that the first -does NOT implement the interface type of the second. - -#### func ShouldNotPanic - -```go -func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string) -``` -ShouldNotPanic receives a void, niladic function and expects to execute the -function without any panic. - -#### func ShouldNotPanicWith - -```go -func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string) -``` -ShouldNotPanicWith receives a void, niladic function and expects to recover a -panic whose content differs from the second argument. - -#### func ShouldNotPointTo - -```go -func ShouldNotPointTo(actual interface{}, expected ...interface{}) string -``` -ShouldNotPointTo receives exactly two parameters and checks to see that they -point to different addresess. - -#### func ShouldNotResemble - -```go -func ShouldNotResemble(actual interface{}, expected ...interface{}) string -``` -ShouldNotResemble receives exactly two parameters and does an inverse deep equal -check (see reflect.DeepEqual) - -#### func ShouldNotStartWith - -```go -func ShouldNotStartWith(actual interface{}, expected ...interface{}) string -``` -ShouldNotStartWith receives exactly 2 string parameters and ensures that the -first does not start with the second. - -#### func ShouldPanic - -```go -func ShouldPanic(actual interface{}, expected ...interface{}) (message string) -``` -ShouldPanic receives a void, niladic function and expects to recover a panic. - -#### func ShouldPanicWith - -```go -func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string) -``` -ShouldPanicWith receives a void, niladic function and expects to recover a panic -with the second argument as the content. - -#### func ShouldPointTo - -```go -func ShouldPointTo(actual interface{}, expected ...interface{}) string -``` -ShouldPointTo receives exactly two parameters and checks to see that they point -to the same address. - -#### func ShouldResemble - -```go -func ShouldResemble(actual interface{}, expected ...interface{}) string -``` -ShouldResemble receives exactly two parameters and does a deep equal check (see -reflect.DeepEqual) - -#### func ShouldStartWith - -```go -func ShouldStartWith(actual interface{}, expected ...interface{}) string -``` -ShouldStartWith receives exactly 2 string parameters and ensures that the first -starts with the second. - -#### func So - -```go -func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) -``` -So is a convenience function (as opposed to an inconvenience function?) for -running assertions on arbitrary arguments in any context, be it for testing or -even application logging. It allows you to perform assertion-like behavior (and -get nicely formatted messages detailing discrepancies) but without the program -blowing up or panicking. All that is required is to import this package and call -`So` with one of the assertions exported by this package as the second -parameter. The first return parameter is a boolean indicating if the assertion -was true. The second return parameter is the well-formatted message showing why -an assertion was incorrect, or blank if the assertion was correct. - -Example: - - if ok, message := So(x, ShouldBeGreaterThan, y); !ok { - log.Println(message) - } - -#### type Assertion - -```go -type Assertion struct { -} -``` - - -#### func New - -```go -func New(t testingT) *Assertion -``` -New swallows the *testing.T struct and prints failed assertions using t.Error. -Example: assertions.New(t).So(1, should.Equal, 1) - -#### func (*Assertion) Failed - -```go -func (this *Assertion) Failed() bool -``` -Failed reports whether any calls to So (on this Assertion instance) have failed. - -#### func (*Assertion) So - -```go -func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool -``` -So calls the standalone So function and additionally, calls t.Error in failure -scenarios. - -#### type FailureView - -```go -type FailureView struct { - Message string `json:"Message"` - Expected string `json:"Expected"` - Actual string `json:"Actual"` -} -``` - -This struct is also declared in -github.com/smartystreets/goconvey/convey/reporting. The json struct tags should -be equal in both declarations. - -#### type Serializer - -```go -type Serializer interface { - // contains filtered or unexported methods -} -``` diff --git a/vendor/github.com/smartystreets/assertions/assertions.goconvey b/vendor/github.com/smartystreets/assertions/assertions.goconvey deleted file mode 100755 index bad2325..0000000 --- a/vendor/github.com/smartystreets/assertions/assertions.goconvey +++ /dev/null @@ -1,3 +0,0 @@ -#ignore --timeout=1s --coverpkg=github.com/smartystreets/assertions,github.com/smartystreets/assertions/internal/oglematchers \ No newline at end of file diff --git a/vendor/github.com/smartystreets/assertions/collections.go b/vendor/github.com/smartystreets/assertions/collections.go deleted file mode 100755 index 1d14210..0000000 --- a/vendor/github.com/smartystreets/assertions/collections.go +++ /dev/null @@ -1,244 +0,0 @@ -package assertions - -import ( - "fmt" - "reflect" - - "github.com/smartystreets/assertions/internal/oglematchers" -) - -// ShouldContain receives exactly two parameters. The first is a slice and the -// second is a proposed member. Membership is determined using ShouldEqual. -func ShouldContain(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil { - typeName := reflect.TypeOf(actual) - - if fmt.Sprintf("%v", matchError) == "which is not a slice or array" { - return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName) - } - return fmt.Sprintf(shouldHaveContained, typeName, expected[0]) - } - return success -} - -// ShouldNotContain receives exactly two parameters. The first is a slice and the -// second is a proposed member. Membership is determinied using ShouldEqual. -func ShouldNotContain(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - typeName := reflect.TypeOf(actual) - - if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil { - if fmt.Sprintf("%v", matchError) == "which is not a slice or array" { - return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName) - } - return success - } - return fmt.Sprintf(shouldNotHaveContained, typeName, expected[0]) -} - -// ShouldContainKey receives exactly two parameters. The first is a map and the -// second is a proposed key. Keys are compared with a simple '=='. -func ShouldContainKey(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - keys, isMap := mapKeys(actual) - if !isMap { - return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual)) - } - - if !keyFound(keys, expected[0]) { - return fmt.Sprintf(shouldHaveContainedKey, reflect.TypeOf(actual), expected) - } - - return "" -} - -// ShouldNotContainKey receives exactly two parameters. The first is a map and the -// second is a proposed absent key. Keys are compared with a simple '=='. -func ShouldNotContainKey(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - keys, isMap := mapKeys(actual) - if !isMap { - return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual)) - } - - if keyFound(keys, expected[0]) { - return fmt.Sprintf(shouldNotHaveContainedKey, reflect.TypeOf(actual), expected) - } - - return "" -} - -func mapKeys(m interface{}) ([]reflect.Value, bool) { - value := reflect.ValueOf(m) - if value.Kind() != reflect.Map { - return nil, false - } - return value.MapKeys(), true -} -func keyFound(keys []reflect.Value, expectedKey interface{}) bool { - found := false - for _, key := range keys { - if key.Interface() == expectedKey { - found = true - } - } - return found -} - -// ShouldBeIn receives at least 2 parameters. The first is a proposed member of the collection -// that is passed in either as the second parameter, or of the collection that is comprised -// of all the remaining parameters. This assertion ensures that the proposed member is in -// the collection (using ShouldEqual). -func ShouldBeIn(actual interface{}, expected ...interface{}) string { - if fail := atLeast(1, expected); fail != success { - return fail - } - - if len(expected) == 1 { - return shouldBeIn(actual, expected[0]) - } - return shouldBeIn(actual, expected) -} -func shouldBeIn(actual interface{}, expected interface{}) string { - if matchError := oglematchers.Contains(actual).Matches(expected); matchError != nil { - return fmt.Sprintf(shouldHaveBeenIn, actual, reflect.TypeOf(expected)) - } - return success -} - -// ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of the collection -// that is passed in either as the second parameter, or of the collection that is comprised -// of all the remaining parameters. This assertion ensures that the proposed member is NOT in -// the collection (using ShouldEqual). -func ShouldNotBeIn(actual interface{}, expected ...interface{}) string { - if fail := atLeast(1, expected); fail != success { - return fail - } - - if len(expected) == 1 { - return shouldNotBeIn(actual, expected[0]) - } - return shouldNotBeIn(actual, expected) -} -func shouldNotBeIn(actual interface{}, expected interface{}) string { - if matchError := oglematchers.Contains(actual).Matches(expected); matchError == nil { - return fmt.Sprintf(shouldNotHaveBeenIn, actual, reflect.TypeOf(expected)) - } - return success -} - -// ShouldBeEmpty receives a single parameter (actual) and determines whether or not -// calling len(actual) would return `0`. It obeys the rules specified by the len -// function for determining length: http://golang.org/pkg/builtin/#len -func ShouldBeEmpty(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } - - if actual == nil { - return success - } - - value := reflect.ValueOf(actual) - switch value.Kind() { - case reflect.Slice: - if value.Len() == 0 { - return success - } - case reflect.Chan: - if value.Len() == 0 { - return success - } - case reflect.Map: - if value.Len() == 0 { - return success - } - case reflect.String: - if value.Len() == 0 { - return success - } - case reflect.Ptr: - elem := value.Elem() - kind := elem.Kind() - if (kind == reflect.Slice || kind == reflect.Array) && elem.Len() == 0 { - return success - } - } - - return fmt.Sprintf(shouldHaveBeenEmpty, actual) -} - -// ShouldNotBeEmpty receives a single parameter (actual) and determines whether or not -// calling len(actual) would return a value greater than zero. It obeys the rules -// specified by the `len` function for determining length: http://golang.org/pkg/builtin/#len -func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } - - if empty := ShouldBeEmpty(actual, expected...); empty != success { - return success - } - return fmt.Sprintf(shouldNotHaveBeenEmpty, actual) -} - -// ShouldHaveLength receives 2 parameters. The first is a collection to check -// the length of, the second being the expected length. It obeys the rules -// specified by the len function for determining length: -// http://golang.org/pkg/builtin/#len -func ShouldHaveLength(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - var expectedLen int64 - lenValue := reflect.ValueOf(expected[0]) - switch lenValue.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - expectedLen = lenValue.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - expectedLen = int64(lenValue.Uint()) - default: - return fmt.Sprintf(shouldHaveBeenAValidInteger, reflect.TypeOf(expected[0])) - } - - if expectedLen < 0 { - return fmt.Sprintf(shouldHaveBeenAValidLength, expected[0]) - } - - value := reflect.ValueOf(actual) - switch value.Kind() { - case reflect.Slice, - reflect.Chan, - reflect.Map, - reflect.String: - if int64(value.Len()) == expectedLen { - return success - } else { - return fmt.Sprintf(shouldHaveHadLength, actual, value.Len(), expectedLen) - } - case reflect.Ptr: - elem := value.Elem() - kind := elem.Kind() - if kind == reflect.Slice || kind == reflect.Array { - if int64(elem.Len()) == expectedLen { - return success - } else { - return fmt.Sprintf(shouldHaveHadLength, actual, elem.Len(), expectedLen) - } - } - } - return fmt.Sprintf(shouldHaveBeenAValidCollection, reflect.TypeOf(actual)) -} diff --git a/vendor/github.com/smartystreets/assertions/doc.go b/vendor/github.com/smartystreets/assertions/doc.go deleted file mode 100755 index 756211c..0000000 --- a/vendor/github.com/smartystreets/assertions/doc.go +++ /dev/null @@ -1,105 +0,0 @@ -// Package assertions contains the implementations for all assertions which -// are referenced in goconvey's `convey` package -// (github.com/smartystreets/goconvey/convey) and gunit (github.com/smartystreets/gunit) -// for use with the So(...) method. -// They can also be used in traditional Go test functions and even in -// applications. -// -// Many of the assertions lean heavily on work done by Aaron Jacobs in his excellent oglematchers library. -// (https://github.com/jacobsa/oglematchers) -// The ShouldResemble assertion leans heavily on work done by Daniel Jacques in his very helpful go-render library. -// (https://github.com/luci/go-render) -package assertions - -import ( - "fmt" - "runtime" -) - -// By default we use a no-op serializer. The actual Serializer provides a JSON -// representation of failure results on selected assertions so the goconvey -// web UI can display a convenient diff. -var serializer Serializer = new(noopSerializer) - -// GoConveyMode provides control over JSON serialization of failures. When -// using the assertions in this package from the convey package JSON results -// are very helpful and can be rendered in a DIFF view. In that case, this function -// will be called with a true value to enable the JSON serialization. By default, -// the assertions in this package will not serializer a JSON result, making -// standalone ussage more convenient. -func GoConveyMode(yes bool) { - if yes { - serializer = newSerializer() - } else { - serializer = new(noopSerializer) - } -} - -type testingT interface { - Error(args ...interface{}) -} - -type Assertion struct { - t testingT - failed bool -} - -// New swallows the *testing.T struct and prints failed assertions using t.Error. -// Example: assertions.New(t).So(1, should.Equal, 1) -func New(t testingT) *Assertion { - return &Assertion{t: t} -} - -// Failed reports whether any calls to So (on this Assertion instance) have failed. -func (this *Assertion) Failed() bool { - return this.failed -} - -// So calls the standalone So function and additionally, calls t.Error in failure scenarios. -func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool { - ok, result := So(actual, assert, expected...) - if !ok { - this.failed = true - _, file, line, _ := runtime.Caller(1) - this.t.Error(fmt.Sprintf("\n%s:%d\n%s", file, line, result)) - } - return ok -} - -// So is a convenience function (as opposed to an inconvenience function?) -// for running assertions on arbitrary arguments in any context, be it for testing or even -// application logging. It allows you to perform assertion-like behavior (and get nicely -// formatted messages detailing discrepancies) but without the program blowing up or panicking. -// All that is required is to import this package and call `So` with one of the assertions -// exported by this package as the second parameter. -// The first return parameter is a boolean indicating if the assertion was true. The second -// return parameter is the well-formatted message showing why an assertion was incorrect, or -// blank if the assertion was correct. -// -// Example: -// -// if ok, message := So(x, ShouldBeGreaterThan, y); !ok { -// log.Println(message) -// } -// -func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) { - if result := so(actual, assert, expected...); len(result) == 0 { - return true, result - } else { - return false, result - } -} - -// so is like So, except that it only returns the string message, which is blank if the -// assertion passed. Used to facilitate testing. -func so(actual interface{}, assert func(interface{}, ...interface{}) string, expected ...interface{}) string { - return assert(actual, expected...) -} - -// assertion is an alias for a function with a signature that the So() -// function can handle. Any future or custom assertions should conform to this -// method signature. The return value should be an empty string if the assertion -// passes and a well-formed failure message if not. -type assertion func(actual interface{}, expected ...interface{}) string - -//////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/smartystreets/assertions/equality.go b/vendor/github.com/smartystreets/assertions/equality.go deleted file mode 100755 index d40231e..0000000 --- a/vendor/github.com/smartystreets/assertions/equality.go +++ /dev/null @@ -1,280 +0,0 @@ -package assertions - -import ( - "errors" - "fmt" - "math" - "reflect" - "strings" - - "github.com/smartystreets/assertions/internal/oglematchers" - "github.com/smartystreets/assertions/internal/go-render/render" -) - -// default acceptable delta for ShouldAlmostEqual -const defaultDelta = 0.0000000001 - -// ShouldEqual receives exactly two parameters and does an equality check. -func ShouldEqual(actual interface{}, expected ...interface{}) string { - if message := need(1, expected); message != success { - return message - } - return shouldEqual(actual, expected[0]) -} -func shouldEqual(actual, expected interface{}) (message string) { - defer func() { - if r := recover(); r != nil { - message = serializer.serialize(expected, actual, fmt.Sprintf(shouldHaveBeenEqual, expected, actual)) - return - } - }() - - if matchError := oglematchers.Equals(expected).Matches(actual); matchError != nil { - expectedSyntax := fmt.Sprintf("%v", expected) - actualSyntax := fmt.Sprintf("%v", actual) - if expectedSyntax == actualSyntax && reflect.TypeOf(expected) != reflect.TypeOf(actual) { - message = fmt.Sprintf(shouldHaveBeenEqualTypeMismatch, expected, expected, actual, actual) - } else { - message = fmt.Sprintf(shouldHaveBeenEqual, expected, actual) - } - message = serializer.serialize(expected, actual, message) - return - } - - return success -} - -// ShouldNotEqual receives exactly two parameters and does an inequality check. -func ShouldNotEqual(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } else if ShouldEqual(actual, expected[0]) == success { - return fmt.Sprintf(shouldNotHaveBeenEqual, actual, expected[0]) - } - return success -} - -// ShouldAlmostEqual makes sure that two parameters are close enough to being equal. -// The acceptable delta may be specified with a third argument, -// or a very small default delta will be used. -func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string { - actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...) - - if err != "" { - return err - } - - if math.Abs(actualFloat-expectedFloat) <= deltaFloat { - return success - } else { - return fmt.Sprintf(shouldHaveBeenAlmostEqual, actualFloat, expectedFloat) - } -} - -// ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual -func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string { - actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...) - - if err != "" { - return err - } - - if math.Abs(actualFloat-expectedFloat) > deltaFloat { - return success - } else { - return fmt.Sprintf(shouldHaveNotBeenAlmostEqual, actualFloat, expectedFloat) - } -} - -func cleanAlmostEqualInput(actual interface{}, expected ...interface{}) (float64, float64, float64, string) { - deltaFloat := 0.0000000001 - - if len(expected) == 0 { - return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided neither)" - } else if len(expected) == 2 { - delta, err := getFloat(expected[1]) - - if err != nil { - return 0.0, 0.0, 0.0, "delta must be a numerical type" - } - - deltaFloat = delta - } else if len(expected) > 2 { - return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided more values)" - } - - actualFloat, err := getFloat(actual) - - if err != nil { - return 0.0, 0.0, 0.0, err.Error() - } - - expectedFloat, err := getFloat(expected[0]) - - if err != nil { - return 0.0, 0.0, 0.0, err.Error() - } - - return actualFloat, expectedFloat, deltaFloat, "" -} - -// returns the float value of any real number, or error if it is not a numerical type -func getFloat(num interface{}) (float64, error) { - numValue := reflect.ValueOf(num) - numKind := numValue.Kind() - - if numKind == reflect.Int || - numKind == reflect.Int8 || - numKind == reflect.Int16 || - numKind == reflect.Int32 || - numKind == reflect.Int64 { - return float64(numValue.Int()), nil - } else if numKind == reflect.Uint || - numKind == reflect.Uint8 || - numKind == reflect.Uint16 || - numKind == reflect.Uint32 || - numKind == reflect.Uint64 { - return float64(numValue.Uint()), nil - } else if numKind == reflect.Float32 || - numKind == reflect.Float64 { - return numValue.Float(), nil - } else { - return 0.0, errors.New("must be a numerical type, but was " + numKind.String()) - } -} - -// ShouldResemble receives exactly two parameters and does a deep equal check (see reflect.DeepEqual) -func ShouldResemble(actual interface{}, expected ...interface{}) string { - if message := need(1, expected); message != success { - return message - } - - if matchError := oglematchers.DeepEquals(expected[0]).Matches(actual); matchError != nil { - return serializer.serializeDetailed(expected[0], actual, - fmt.Sprintf(shouldHaveResembled, render.Render(expected[0]), render.Render(actual))) - } - - return success -} - -// ShouldNotResemble receives exactly two parameters and does an inverse deep equal check (see reflect.DeepEqual) -func ShouldNotResemble(actual interface{}, expected ...interface{}) string { - if message := need(1, expected); message != success { - return message - } else if ShouldResemble(actual, expected[0]) == success { - return fmt.Sprintf(shouldNotHaveResembled, render.Render(actual), render.Render(expected[0])) - } - return success -} - -// ShouldPointTo receives exactly two parameters and checks to see that they point to the same address. -func ShouldPointTo(actual interface{}, expected ...interface{}) string { - if message := need(1, expected); message != success { - return message - } - return shouldPointTo(actual, expected[0]) - -} -func shouldPointTo(actual, expected interface{}) string { - actualValue := reflect.ValueOf(actual) - expectedValue := reflect.ValueOf(expected) - - if ShouldNotBeNil(actual) != success { - return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "nil") - } else if ShouldNotBeNil(expected) != success { - return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "nil") - } else if actualValue.Kind() != reflect.Ptr { - return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "not") - } else if expectedValue.Kind() != reflect.Ptr { - return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "not") - } else if ShouldEqual(actualValue.Pointer(), expectedValue.Pointer()) != success { - actualAddress := reflect.ValueOf(actual).Pointer() - expectedAddress := reflect.ValueOf(expected).Pointer() - return serializer.serialize(expectedAddress, actualAddress, fmt.Sprintf(shouldHavePointedTo, - actual, actualAddress, - expected, expectedAddress)) - } - return success -} - -// ShouldNotPointTo receives exactly two parameters and checks to see that they point to different addresess. -func ShouldNotPointTo(actual interface{}, expected ...interface{}) string { - if message := need(1, expected); message != success { - return message - } - compare := ShouldPointTo(actual, expected[0]) - if strings.HasPrefix(compare, shouldBePointers) { - return compare - } else if compare == success { - return fmt.Sprintf(shouldNotHavePointedTo, actual, expected[0], reflect.ValueOf(actual).Pointer()) - } - return success -} - -// ShouldBeNil receives a single parameter and ensures that it is nil. -func ShouldBeNil(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } else if actual == nil { - return success - } else if interfaceHasNilValue(actual) { - return success - } - return fmt.Sprintf(shouldHaveBeenNil, actual) -} -func interfaceHasNilValue(actual interface{}) bool { - value := reflect.ValueOf(actual) - kind := value.Kind() - nilable := kind == reflect.Slice || - kind == reflect.Chan || - kind == reflect.Func || - kind == reflect.Ptr || - kind == reflect.Map - - // Careful: reflect.Value.IsNil() will panic unless it's an interface, chan, map, func, slice, or ptr - // Reference: http://golang.org/pkg/reflect/#Value.IsNil - return nilable && value.IsNil() -} - -// ShouldNotBeNil receives a single parameter and ensures that it is not nil. -func ShouldNotBeNil(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } else if ShouldBeNil(actual) == success { - return fmt.Sprintf(shouldNotHaveBeenNil, actual) - } - return success -} - -// ShouldBeTrue receives a single parameter and ensures that it is true. -func ShouldBeTrue(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } else if actual != true { - return fmt.Sprintf(shouldHaveBeenTrue, actual) - } - return success -} - -// ShouldBeFalse receives a single parameter and ensures that it is false. -func ShouldBeFalse(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } else if actual != false { - return fmt.Sprintf(shouldHaveBeenFalse, actual) - } - return success -} - -// ShouldBeZeroValue receives a single parameter and ensures that it is -// the Go equivalent of the default value, or "zero" value. -func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } - zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface() - if !reflect.DeepEqual(zeroVal, actual) { - return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldHaveBeenZeroValue, actual)) - } - return success -} diff --git a/vendor/github.com/smartystreets/assertions/filter.go b/vendor/github.com/smartystreets/assertions/filter.go deleted file mode 100755 index ab51979..0000000 --- a/vendor/github.com/smartystreets/assertions/filter.go +++ /dev/null @@ -1,23 +0,0 @@ -package assertions - -import "fmt" - -const ( - success = "" - needExactValues = "This assertion requires exactly %d comparison values (you provided %d)." - needNonEmptyCollection = "This assertion requires at least 1 comparison value (you provided 0)." -) - -func need(needed int, expected []interface{}) string { - if len(expected) != needed { - return fmt.Sprintf(needExactValues, needed, len(expected)) - } - return success -} - -func atLeast(minimum int, expected []interface{}) string { - if len(expected) < 1 { - return needNonEmptyCollection - } - return success -} diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE b/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE deleted file mode 100755 index d41c3fc..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2015 The Chromium Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go deleted file mode 100755 index b1387f7..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright 2015 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package render - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strconv" -) - -var builtinTypeMap = map[reflect.Kind]string{ - reflect.Bool: "bool", - reflect.Complex128: "complex128", - reflect.Complex64: "complex64", - reflect.Float32: "float32", - reflect.Float64: "float64", - reflect.Int16: "int16", - reflect.Int32: "int32", - reflect.Int64: "int64", - reflect.Int8: "int8", - reflect.Int: "int", - reflect.String: "string", - reflect.Uint16: "uint16", - reflect.Uint32: "uint32", - reflect.Uint64: "uint64", - reflect.Uint8: "uint8", - reflect.Uint: "uint", - reflect.Uintptr: "uintptr", -} - -var builtinTypeSet = map[string]struct{}{} - -func init() { - for _, v := range builtinTypeMap { - builtinTypeSet[v] = struct{}{} - } -} - -var typeOfString = reflect.TypeOf("") -var typeOfInt = reflect.TypeOf(int(1)) -var typeOfUint = reflect.TypeOf(uint(1)) -var typeOfFloat = reflect.TypeOf(10.1) - -// Render converts a structure to a string representation. Unline the "%#v" -// format string, this resolves pointer types' contents in structs, maps, and -// slices/arrays and prints their field values. -func Render(v interface{}) string { - buf := bytes.Buffer{} - s := (*traverseState)(nil) - s.render(&buf, 0, reflect.ValueOf(v), false) - return buf.String() -} - -// renderPointer is called to render a pointer value. -// -// This is overridable so that the test suite can have deterministic pointer -// values in its expectations. -var renderPointer = func(buf *bytes.Buffer, p uintptr) { - fmt.Fprintf(buf, "0x%016x", p) -} - -// traverseState is used to note and avoid recursion as struct members are being -// traversed. -// -// traverseState is allowed to be nil. Specifically, the root state is nil. -type traverseState struct { - parent *traverseState - ptr uintptr -} - -func (s *traverseState) forkFor(ptr uintptr) *traverseState { - for cur := s; cur != nil; cur = cur.parent { - if ptr == cur.ptr { - return nil - } - } - - fs := &traverseState{ - parent: s, - ptr: ptr, - } - return fs -} - -func (s *traverseState) render(buf *bytes.Buffer, ptrs int, v reflect.Value, implicit bool) { - if v.Kind() == reflect.Invalid { - buf.WriteString("nil") - return - } - vt := v.Type() - - // If the type being rendered is a potentially recursive type (a type that - // can contain itself as a member), we need to avoid recursion. - // - // If we've already seen this type before, mark that this is the case and - // write a recursion placeholder instead of actually rendering it. - // - // If we haven't seen it before, fork our `seen` tracking so any higher-up - // renderers will also render it at least once, then mark that we've seen it - // to avoid recursing on lower layers. - pe := uintptr(0) - vk := vt.Kind() - switch vk { - case reflect.Ptr: - // Since structs and arrays aren't pointers, they can't directly be - // recursed, but they can contain pointers to themselves. Record their - // pointer to avoid this. - switch v.Elem().Kind() { - case reflect.Struct, reflect.Array: - pe = v.Pointer() - } - - case reflect.Slice, reflect.Map: - pe = v.Pointer() - } - if pe != 0 { - s = s.forkFor(pe) - if s == nil { - buf.WriteString("") - return - } - } - - isAnon := func(t reflect.Type) bool { - if t.Name() != "" { - if _, ok := builtinTypeSet[t.Name()]; !ok { - return false - } - } - return t.Kind() != reflect.Interface - } - - switch vk { - case reflect.Struct: - if !implicit { - writeType(buf, ptrs, vt) - } - structAnon := vt.Name() == "" - buf.WriteRune('{') - for i := 0; i < vt.NumField(); i++ { - if i > 0 { - buf.WriteString(", ") - } - anon := structAnon && isAnon(vt.Field(i).Type) - - if !anon { - buf.WriteString(vt.Field(i).Name) - buf.WriteRune(':') - } - - s.render(buf, 0, v.Field(i), anon) - } - buf.WriteRune('}') - - case reflect.Slice: - if v.IsNil() { - if !implicit { - writeType(buf, ptrs, vt) - buf.WriteString("(nil)") - } else { - buf.WriteString("nil") - } - return - } - fallthrough - - case reflect.Array: - if !implicit { - writeType(buf, ptrs, vt) - } - anon := vt.Name() == "" && isAnon(vt.Elem()) - buf.WriteString("{") - for i := 0; i < v.Len(); i++ { - if i > 0 { - buf.WriteString(", ") - } - - s.render(buf, 0, v.Index(i), anon) - } - buf.WriteRune('}') - - case reflect.Map: - if !implicit { - writeType(buf, ptrs, vt) - } - if v.IsNil() { - buf.WriteString("(nil)") - } else { - buf.WriteString("{") - - mkeys := v.MapKeys() - tryAndSortMapKeys(vt, mkeys) - - kt := vt.Key() - keyAnon := typeOfString.ConvertibleTo(kt) || typeOfInt.ConvertibleTo(kt) || typeOfUint.ConvertibleTo(kt) || typeOfFloat.ConvertibleTo(kt) - valAnon := vt.Name() == "" && isAnon(vt.Elem()) - for i, mk := range mkeys { - if i > 0 { - buf.WriteString(", ") - } - - s.render(buf, 0, mk, keyAnon) - buf.WriteString(":") - s.render(buf, 0, v.MapIndex(mk), valAnon) - } - buf.WriteRune('}') - } - - case reflect.Ptr: - ptrs++ - fallthrough - case reflect.Interface: - if v.IsNil() { - writeType(buf, ptrs, v.Type()) - buf.WriteString("(nil)") - } else { - s.render(buf, ptrs, v.Elem(), false) - } - - case reflect.Chan, reflect.Func, reflect.UnsafePointer: - writeType(buf, ptrs, vt) - buf.WriteRune('(') - renderPointer(buf, v.Pointer()) - buf.WriteRune(')') - - default: - tstr := vt.String() - implicit = implicit || (ptrs == 0 && builtinTypeMap[vk] == tstr) - if !implicit { - writeType(buf, ptrs, vt) - buf.WriteRune('(') - } - - switch vk { - case reflect.String: - fmt.Fprintf(buf, "%q", v.String()) - case reflect.Bool: - fmt.Fprintf(buf, "%v", v.Bool()) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - fmt.Fprintf(buf, "%d", v.Int()) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - fmt.Fprintf(buf, "%d", v.Uint()) - - case reflect.Float32, reflect.Float64: - fmt.Fprintf(buf, "%g", v.Float()) - - case reflect.Complex64, reflect.Complex128: - fmt.Fprintf(buf, "%g", v.Complex()) - } - - if !implicit { - buf.WriteRune(')') - } - } -} - -func writeType(buf *bytes.Buffer, ptrs int, t reflect.Type) { - parens := ptrs > 0 - switch t.Kind() { - case reflect.Chan, reflect.Func, reflect.UnsafePointer: - parens = true - } - - if parens { - buf.WriteRune('(') - for i := 0; i < ptrs; i++ { - buf.WriteRune('*') - } - } - - switch t.Kind() { - case reflect.Ptr: - if ptrs == 0 { - // This pointer was referenced from within writeType (e.g., as part of - // rendering a list), and so hasn't had its pointer asterisk accounted - // for. - buf.WriteRune('*') - } - writeType(buf, 0, t.Elem()) - - case reflect.Interface: - if n := t.Name(); n != "" { - buf.WriteString(t.String()) - } else { - buf.WriteString("interface{}") - } - - case reflect.Array: - buf.WriteRune('[') - buf.WriteString(strconv.FormatInt(int64(t.Len()), 10)) - buf.WriteRune(']') - writeType(buf, 0, t.Elem()) - - case reflect.Slice: - if t == reflect.SliceOf(t.Elem()) { - buf.WriteString("[]") - writeType(buf, 0, t.Elem()) - } else { - // Custom slice type, use type name. - buf.WriteString(t.String()) - } - - case reflect.Map: - if t == reflect.MapOf(t.Key(), t.Elem()) { - buf.WriteString("map[") - writeType(buf, 0, t.Key()) - buf.WriteRune(']') - writeType(buf, 0, t.Elem()) - } else { - // Custom map type, use type name. - buf.WriteString(t.String()) - } - - default: - buf.WriteString(t.String()) - } - - if parens { - buf.WriteRune(')') - } -} - -type cmpFn func(a, b reflect.Value) int - -type sortableValueSlice struct { - cmp cmpFn - elements []reflect.Value -} - -func (s sortableValueSlice) Len() int { - return len(s.elements) -} - -func (s sortableValueSlice) Less(i, j int) bool { - return s.cmp(s.elements[i], s.elements[j]) < 0 -} - -func (s sortableValueSlice) Swap(i, j int) { - s.elements[i], s.elements[j] = s.elements[j], s.elements[i] -} - -// cmpForType returns a cmpFn which sorts the data for some type t in the same -// order that a go-native map key is compared for equality. -func cmpForType(t reflect.Type) cmpFn { - switch t.Kind() { - case reflect.String: - return func(av, bv reflect.Value) int { - a, b := av.String(), bv.String() - if a < b { - return -1 - } else if a > b { - return 1 - } - return 0 - } - - case reflect.Bool: - return func(av, bv reflect.Value) int { - a, b := av.Bool(), bv.Bool() - if !a && b { - return -1 - } else if a && !b { - return 1 - } - return 0 - } - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return func(av, bv reflect.Value) int { - a, b := av.Int(), bv.Int() - if a < b { - return -1 - } else if a > b { - return 1 - } - return 0 - } - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, reflect.Uintptr, reflect.UnsafePointer: - return func(av, bv reflect.Value) int { - a, b := av.Uint(), bv.Uint() - if a < b { - return -1 - } else if a > b { - return 1 - } - return 0 - } - - case reflect.Float32, reflect.Float64: - return func(av, bv reflect.Value) int { - a, b := av.Float(), bv.Float() - if a < b { - return -1 - } else if a > b { - return 1 - } - return 0 - } - - case reflect.Interface: - return func(av, bv reflect.Value) int { - a, b := av.InterfaceData(), bv.InterfaceData() - if a[0] < b[0] { - return -1 - } else if a[0] > b[0] { - return 1 - } - if a[1] < b[1] { - return -1 - } else if a[1] > b[1] { - return 1 - } - return 0 - } - - case reflect.Complex64, reflect.Complex128: - return func(av, bv reflect.Value) int { - a, b := av.Complex(), bv.Complex() - if real(a) < real(b) { - return -1 - } else if real(a) > real(b) { - return 1 - } - if imag(a) < imag(b) { - return -1 - } else if imag(a) > imag(b) { - return 1 - } - return 0 - } - - case reflect.Ptr, reflect.Chan: - return func(av, bv reflect.Value) int { - a, b := av.Pointer(), bv.Pointer() - if a < b { - return -1 - } else if a > b { - return 1 - } - return 0 - } - - case reflect.Struct: - cmpLst := make([]cmpFn, t.NumField()) - for i := range cmpLst { - cmpLst[i] = cmpForType(t.Field(i).Type) - } - return func(a, b reflect.Value) int { - for i, cmp := range cmpLst { - if rslt := cmp(a.Field(i), b.Field(i)); rslt != 0 { - return rslt - } - } - return 0 - } - } - - return nil -} - -func tryAndSortMapKeys(mt reflect.Type, k []reflect.Value) { - if cmp := cmpForType(mt.Key()); cmp != nil { - sort.Sort(sortableValueSlice{cmp, k}) - } -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE b/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE deleted file mode 100755 index 75b5248..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md b/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md deleted file mode 100755 index 9186d57..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md +++ /dev/null @@ -1,58 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers) - -`oglematchers` is a package for the Go programming language containing a set of -matchers, useful in a testing or mocking framework, inspired by and mostly -compatible with [Google Test][googletest] for C++ and -[Google JS Test][google-js-test]. The package is used by the -[ogletest][ogletest] testing framework and [oglemock][oglemock] mocking -framework, which may be more directly useful to you, but can be generically used -elsewhere as well. - -A "matcher" is simply an object with a `Matches` method defining a set of golang -values matched by the matcher, and a `Description` method describing that set. -For example, here are some matchers: - -```go -// Numbers -Equals(17.13) -LessThan(19) - -// Strings -Equals("taco") -HasSubstr("burrito") -MatchesRegex("t.*o") - -// Combining matchers -AnyOf(LessThan(17), GreaterThan(19)) -``` - -There are lots more; see [here][reference] for a reference. You can also add -your own simply by implementing the `oglematchers.Matcher` interface. - - -Installation ------------- - -First, make sure you have installed Go 1.0.2 or newer. See -[here][golang-install] for instructions. - -Use the following command to install `oglematchers` and keep it up to date: - - go get -u github.com/smartystreets/assertions/internal/oglematchers - - -Documentation -------------- - -See [here][reference] for documentation. Alternatively, you can install the -package and then use `godoc`: - - godoc github.com/smartystreets/assertions/internal/oglematchers - - -[reference]: http://godoc.org/github.com/smartystreets/assertions/internal/oglematchers -[golang-install]: http://golang.org/doc/install.html -[googletest]: http://code.google.com/p/googletest/ -[google-js-test]: http://code.google.com/p/google-js-test/ -[ogletest]: http://github.com/smartystreets/assertions/internal/ogletest -[oglemock]: http://github.com/smartystreets/assertions/internal/oglemock diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/all_of.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/all_of.go deleted file mode 100755 index e42c509..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/all_of.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "strings" -) - -// AllOf accepts a set of matchers S and returns a matcher that follows the -// algorithm below when considering a candidate c: -// -// 1. Return true if for every Matcher m in S, m matches c. -// -// 2. Otherwise, if there is a matcher m in S such that m returns a fatal -// error for c, return that matcher's error message. -// -// 3. Otherwise, return false with the error from some wrapped matcher. -// -// This is akin to a logical AND operation for matchers. -func AllOf(matchers ...Matcher) Matcher { - return &allOfMatcher{matchers} -} - -type allOfMatcher struct { - wrappedMatchers []Matcher -} - -func (m *allOfMatcher) Description() string { - // Special case: the empty set. - if len(m.wrappedMatchers) == 0 { - return "is anything" - } - - // Join the descriptions for the wrapped matchers. - wrappedDescs := make([]string, len(m.wrappedMatchers)) - for i, wrappedMatcher := range m.wrappedMatchers { - wrappedDescs[i] = wrappedMatcher.Description() - } - - return strings.Join(wrappedDescs, ", and ") -} - -func (m *allOfMatcher) Matches(c interface{}) (err error) { - for _, wrappedMatcher := range m.wrappedMatchers { - if wrappedErr := wrappedMatcher.Matches(c); wrappedErr != nil { - err = wrappedErr - - // If the error is fatal, return immediately with this error. - _, ok := wrappedErr.(*FatalError) - if ok { - return - } - } - } - - return -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/any.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/any.go deleted file mode 100755 index 2fc88b5..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/any.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -// Any returns a matcher that matches any value. -func Any() Matcher { - return &anyMatcher{} -} - -type anyMatcher struct { -} - -func (m *anyMatcher) Description() string { - return "is anything" -} - -func (m *anyMatcher) Matches(c interface{}) error { - return nil -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go deleted file mode 100755 index fa3a309..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "reflect" - "strings" -) - -// AnyOf accepts a set of values S and returns a matcher that follows the -// algorithm below when considering a candidate c: -// -// 1. If there exists a value m in S such that m implements the Matcher -// interface and m matches c, return true. -// -// 2. Otherwise, if there exists a value v in S such that v does not implement -// the Matcher interface and the matcher Equals(v) matches c, return true. -// -// 3. Otherwise, if there is a value m in S such that m implements the Matcher -// interface and m returns a fatal error for c, return that fatal error. -// -// 4. Otherwise, return false. -// -// This is akin to a logical OR operation for matchers, with non-matchers x -// being treated as Equals(x). -func AnyOf(vals ...interface{}) Matcher { - // Get ahold of a type variable for the Matcher interface. - var dummy *Matcher - matcherType := reflect.TypeOf(dummy).Elem() - - // Create a matcher for each value, or use the value itself if it's already a - // matcher. - wrapped := make([]Matcher, len(vals)) - for i, v := range vals { - t := reflect.TypeOf(v) - if t != nil && t.Implements(matcherType) { - wrapped[i] = v.(Matcher) - } else { - wrapped[i] = Equals(v) - } - } - - return &anyOfMatcher{wrapped} -} - -type anyOfMatcher struct { - wrapped []Matcher -} - -func (m *anyOfMatcher) Description() string { - wrappedDescs := make([]string, len(m.wrapped)) - for i, matcher := range m.wrapped { - wrappedDescs[i] = matcher.Description() - } - - return fmt.Sprintf("or(%s)", strings.Join(wrappedDescs, ", ")) -} - -func (m *anyOfMatcher) Matches(c interface{}) (err error) { - err = errors.New("") - - // Try each matcher in turn. - for _, matcher := range m.wrapped { - wrappedErr := matcher.Matches(c) - - // Return immediately if there's a match. - if wrappedErr == nil { - err = nil - return - } - - // Note the fatal error, if any. - if _, isFatal := wrappedErr.(*FatalError); isFatal { - err = wrappedErr - } - } - - return -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go deleted file mode 100755 index a79cc95..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "fmt" - "reflect" -) - -// Return a matcher that matches arrays slices with at least one element that -// matches the supplied argument. If the argument x is not itself a Matcher, -// this is equivalent to Contains(Equals(x)). -func Contains(x interface{}) Matcher { - var result containsMatcher - var ok bool - - if result.elementMatcher, ok = x.(Matcher); !ok { - result.elementMatcher = DeepEquals(x) - } - - return &result -} - -type containsMatcher struct { - elementMatcher Matcher -} - -func (m *containsMatcher) Description() string { - return fmt.Sprintf("contains: %s", m.elementMatcher.Description()) -} - -func (m *containsMatcher) Matches(candidate interface{}) error { - // The candidate must be a slice or an array. - v := reflect.ValueOf(candidate) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return NewFatalError("which is not a slice or array") - } - - // Check each element. - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - if matchErr := m.elementMatcher.Matches(elem.Interface()); matchErr == nil { - return nil - } - } - - return fmt.Errorf("") -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go deleted file mode 100755 index 755ea2f..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2012 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "bytes" - "errors" - "fmt" - "reflect" -) - -var byteSliceType reflect.Type = reflect.TypeOf([]byte{}) - -// DeepEquals returns a matcher that matches based on 'deep equality', as -// defined by the reflect package. This matcher requires that values have -// identical types to x. -func DeepEquals(x interface{}) Matcher { - return &deepEqualsMatcher{x} -} - -type deepEqualsMatcher struct { - x interface{} -} - -func (m *deepEqualsMatcher) Description() string { - xDesc := fmt.Sprintf("%v", m.x) - xValue := reflect.ValueOf(m.x) - - // Special case: fmt.Sprintf presents nil slices as "[]", but - // reflect.DeepEqual makes a distinction between nil and empty slices. Make - // this less confusing. - if xValue.Kind() == reflect.Slice && xValue.IsNil() { - xDesc = "" - } - - return fmt.Sprintf("deep equals: %s", xDesc) -} - -func (m *deepEqualsMatcher) Matches(c interface{}) error { - // Make sure the types match. - ct := reflect.TypeOf(c) - xt := reflect.TypeOf(m.x) - - if ct != xt { - return NewFatalError(fmt.Sprintf("which is of type %v", ct)) - } - - // Special case: handle byte slices more efficiently. - cValue := reflect.ValueOf(c) - xValue := reflect.ValueOf(m.x) - - if ct == byteSliceType && !cValue.IsNil() && !xValue.IsNil() { - xBytes := m.x.([]byte) - cBytes := c.([]byte) - - if bytes.Equal(cBytes, xBytes) { - return nil - } - - return errors.New("") - } - - // Defer to the reflect package. - if reflect.DeepEqual(m.x, c) { - return nil - } - - // Special case: if the comparison failed because c is the nil slice, given - // an indication of this (since its value is printed as "[]"). - if cValue.Kind() == reflect.Slice && cValue.IsNil() { - return errors.New("which is nil") - } - - return errors.New("") -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/elements_are.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/elements_are.go deleted file mode 100755 index 9ddb974..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/elements_are.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "reflect" - "strings" -) - -// Given a list of arguments M, ElementsAre returns a matcher that matches -// arrays and slices A where all of the following hold: -// -// * A is the same length as M. -// -// * For each i < len(A) where M[i] is a matcher, A[i] matches M[i]. -// -// * For each i < len(A) where M[i] is not a matcher, A[i] matches -// Equals(M[i]). -// -func ElementsAre(M ...interface{}) Matcher { - // Copy over matchers, or convert to Equals(x) for non-matcher x. - subMatchers := make([]Matcher, len(M)) - for i, x := range M { - if matcher, ok := x.(Matcher); ok { - subMatchers[i] = matcher - continue - } - - subMatchers[i] = Equals(x) - } - - return &elementsAreMatcher{subMatchers} -} - -type elementsAreMatcher struct { - subMatchers []Matcher -} - -func (m *elementsAreMatcher) Description() string { - subDescs := make([]string, len(m.subMatchers)) - for i, sm := range m.subMatchers { - subDescs[i] = sm.Description() - } - - return fmt.Sprintf("elements are: [%s]", strings.Join(subDescs, ", ")) -} - -func (m *elementsAreMatcher) Matches(candidates interface{}) error { - // The candidate must be a slice or an array. - v := reflect.ValueOf(candidates) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return NewFatalError("which is not a slice or array") - } - - // The length must be correct. - if v.Len() != len(m.subMatchers) { - return errors.New(fmt.Sprintf("which is of length %d", v.Len())) - } - - // Check each element. - for i, subMatcher := range m.subMatchers { - c := v.Index(i) - if matchErr := subMatcher.Matches(c.Interface()); matchErr != nil { - // Return an errors indicating which element doesn't match. If the - // matcher error was fatal, make this one fatal too. - err := errors.New(fmt.Sprintf("whose element %d doesn't match", i)) - if _, isFatal := matchErr.(*FatalError); isFatal { - err = NewFatalError(err.Error()) - } - - return err - } - } - - return nil -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go deleted file mode 100755 index ee93e33..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -// Equals(x) returns a matcher that matches values v such that v and x are -// equivalent. This includes the case when the comparison v == x using Go's -// built-in comparison operator is legal (except for structs, which this -// matcher does not support), but for convenience the following rules also -// apply: -// -// * Type checking is done based on underlying types rather than actual -// types, so that e.g. two aliases for string can be compared: -// -// type stringAlias1 string -// type stringAlias2 string -// -// a := "taco" -// b := stringAlias1("taco") -// c := stringAlias2("taco") -// -// ExpectTrue(a == b) // Legal, passes -// ExpectTrue(b == c) // Illegal, doesn't compile -// -// ExpectThat(a, Equals(b)) // Passes -// ExpectThat(b, Equals(c)) // Passes -// -// * Values of numeric type are treated as if they were abstract numbers, and -// compared accordingly. Therefore Equals(17) will match int(17), -// int16(17), uint(17), float32(17), complex64(17), and so on. -// -// If you want a stricter matcher that contains no such cleverness, see -// IdenticalTo instead. -// -// Arrays are supported by this matcher, but do not participate in the -// exceptions above. Two arrays compared with this matcher must have identical -// types, and their element type must itself be comparable according to Go's == -// operator. -func Equals(x interface{}) Matcher { - v := reflect.ValueOf(x) - - // This matcher doesn't support structs. - if v.Kind() == reflect.Struct { - panic(fmt.Sprintf("oglematchers.Equals: unsupported kind %v", v.Kind())) - } - - // The == operator is not defined for non-nil slices. - if v.Kind() == reflect.Slice && v.Pointer() != uintptr(0) { - panic(fmt.Sprintf("oglematchers.Equals: non-nil slice")) - } - - return &equalsMatcher{v} -} - -type equalsMatcher struct { - expectedValue reflect.Value -} - -//////////////////////////////////////////////////////////////////////// -// Numeric types -//////////////////////////////////////////////////////////////////////// - -func isSignedInteger(v reflect.Value) bool { - k := v.Kind() - return k >= reflect.Int && k <= reflect.Int64 -} - -func isUnsignedInteger(v reflect.Value) bool { - k := v.Kind() - return k >= reflect.Uint && k <= reflect.Uintptr -} - -func isInteger(v reflect.Value) bool { - return isSignedInteger(v) || isUnsignedInteger(v) -} - -func isFloat(v reflect.Value) bool { - k := v.Kind() - return k == reflect.Float32 || k == reflect.Float64 -} - -func isComplex(v reflect.Value) bool { - k := v.Kind() - return k == reflect.Complex64 || k == reflect.Complex128 -} - -func checkAgainstInt64(e int64, c reflect.Value) (err error) { - err = errors.New("") - - switch { - case isSignedInteger(c): - if c.Int() == e { - err = nil - } - - case isUnsignedInteger(c): - u := c.Uint() - if u <= math.MaxInt64 && int64(u) == e { - err = nil - } - - // Turn around the various floating point types so that the checkAgainst* - // functions for them can deal with precision issues. - case isFloat(c), isComplex(c): - return Equals(c.Interface()).Matches(e) - - default: - err = NewFatalError("which is not numeric") - } - - return -} - -func checkAgainstUint64(e uint64, c reflect.Value) (err error) { - err = errors.New("") - - switch { - case isSignedInteger(c): - i := c.Int() - if i >= 0 && uint64(i) == e { - err = nil - } - - case isUnsignedInteger(c): - if c.Uint() == e { - err = nil - } - - // Turn around the various floating point types so that the checkAgainst* - // functions for them can deal with precision issues. - case isFloat(c), isComplex(c): - return Equals(c.Interface()).Matches(e) - - default: - err = NewFatalError("which is not numeric") - } - - return -} - -func checkAgainstFloat32(e float32, c reflect.Value) (err error) { - err = errors.New("") - - switch { - case isSignedInteger(c): - if float32(c.Int()) == e { - err = nil - } - - case isUnsignedInteger(c): - if float32(c.Uint()) == e { - err = nil - } - - case isFloat(c): - // Compare using float32 to avoid a false sense of precision; otherwise - // e.g. Equals(float32(0.1)) won't match float32(0.1). - if float32(c.Float()) == e { - err = nil - } - - case isComplex(c): - comp := c.Complex() - rl := real(comp) - im := imag(comp) - - // Compare using float32 to avoid a false sense of precision; otherwise - // e.g. Equals(float32(0.1)) won't match (0.1 + 0i). - if im == 0 && float32(rl) == e { - err = nil - } - - default: - err = NewFatalError("which is not numeric") - } - - return -} - -func checkAgainstFloat64(e float64, c reflect.Value) (err error) { - err = errors.New("") - - ck := c.Kind() - - switch { - case isSignedInteger(c): - if float64(c.Int()) == e { - err = nil - } - - case isUnsignedInteger(c): - if float64(c.Uint()) == e { - err = nil - } - - // If the actual value is lower precision, turn the comparison around so we - // apply the low-precision rules. Otherwise, e.g. Equals(0.1) may not match - // float32(0.1). - case ck == reflect.Float32 || ck == reflect.Complex64: - return Equals(c.Interface()).Matches(e) - - // Otherwise, compare with double precision. - case isFloat(c): - if c.Float() == e { - err = nil - } - - case isComplex(c): - comp := c.Complex() - rl := real(comp) - im := imag(comp) - - if im == 0 && rl == e { - err = nil - } - - default: - err = NewFatalError("which is not numeric") - } - - return -} - -func checkAgainstComplex64(e complex64, c reflect.Value) (err error) { - err = errors.New("") - realPart := real(e) - imaginaryPart := imag(e) - - switch { - case isInteger(c) || isFloat(c): - // If we have no imaginary part, then we should just compare against the - // real part. Otherwise, we can't be equal. - if imaginaryPart != 0 { - return - } - - return checkAgainstFloat32(realPart, c) - - case isComplex(c): - // Compare using complex64 to avoid a false sense of precision; otherwise - // e.g. Equals(0.1 + 0i) won't match float32(0.1). - if complex64(c.Complex()) == e { - err = nil - } - - default: - err = NewFatalError("which is not numeric") - } - - return -} - -func checkAgainstComplex128(e complex128, c reflect.Value) (err error) { - err = errors.New("") - realPart := real(e) - imaginaryPart := imag(e) - - switch { - case isInteger(c) || isFloat(c): - // If we have no imaginary part, then we should just compare against the - // real part. Otherwise, we can't be equal. - if imaginaryPart != 0 { - return - } - - return checkAgainstFloat64(realPart, c) - - case isComplex(c): - if c.Complex() == e { - err = nil - } - - default: - err = NewFatalError("which is not numeric") - } - - return -} - -//////////////////////////////////////////////////////////////////////// -// Other types -//////////////////////////////////////////////////////////////////////// - -func checkAgainstBool(e bool, c reflect.Value) (err error) { - if c.Kind() != reflect.Bool { - err = NewFatalError("which is not a bool") - return - } - - err = errors.New("") - if c.Bool() == e { - err = nil - } - return -} - -func checkAgainstChan(e reflect.Value, c reflect.Value) (err error) { - // Create a description of e's type, e.g. "chan int". - typeStr := fmt.Sprintf("%s %s", e.Type().ChanDir(), e.Type().Elem()) - - // Make sure c is a chan of the correct type. - if c.Kind() != reflect.Chan || - c.Type().ChanDir() != e.Type().ChanDir() || - c.Type().Elem() != e.Type().Elem() { - err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) - return - } - - err = errors.New("") - if c.Pointer() == e.Pointer() { - err = nil - } - return -} - -func checkAgainstFunc(e reflect.Value, c reflect.Value) (err error) { - // Make sure c is a function. - if c.Kind() != reflect.Func { - err = NewFatalError("which is not a function") - return - } - - err = errors.New("") - if c.Pointer() == e.Pointer() { - err = nil - } - return -} - -func checkAgainstMap(e reflect.Value, c reflect.Value) (err error) { - // Make sure c is a map. - if c.Kind() != reflect.Map { - err = NewFatalError("which is not a map") - return - } - - err = errors.New("") - if c.Pointer() == e.Pointer() { - err = nil - } - return -} - -func checkAgainstPtr(e reflect.Value, c reflect.Value) (err error) { - // Create a description of e's type, e.g. "*int". - typeStr := fmt.Sprintf("*%v", e.Type().Elem()) - - // Make sure c is a pointer of the correct type. - if c.Kind() != reflect.Ptr || - c.Type().Elem() != e.Type().Elem() { - err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) - return - } - - err = errors.New("") - if c.Pointer() == e.Pointer() { - err = nil - } - return -} - -func checkAgainstSlice(e reflect.Value, c reflect.Value) (err error) { - // Create a description of e's type, e.g. "[]int". - typeStr := fmt.Sprintf("[]%v", e.Type().Elem()) - - // Make sure c is a slice of the correct type. - if c.Kind() != reflect.Slice || - c.Type().Elem() != e.Type().Elem() { - err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) - return - } - - err = errors.New("") - if c.Pointer() == e.Pointer() { - err = nil - } - return -} - -func checkAgainstString(e reflect.Value, c reflect.Value) (err error) { - // Make sure c is a string. - if c.Kind() != reflect.String { - err = NewFatalError("which is not a string") - return - } - - err = errors.New("") - if c.String() == e.String() { - err = nil - } - return -} - -func checkAgainstArray(e reflect.Value, c reflect.Value) (err error) { - // Create a description of e's type, e.g. "[2]int". - typeStr := fmt.Sprintf("%v", e.Type()) - - // Make sure c is the correct type. - if c.Type() != e.Type() { - err = NewFatalError(fmt.Sprintf("which is not %s", typeStr)) - return - } - - // Check for equality. - if e.Interface() != c.Interface() { - err = errors.New("") - return - } - - return -} - -func checkAgainstUnsafePointer(e reflect.Value, c reflect.Value) (err error) { - // Make sure c is a pointer. - if c.Kind() != reflect.UnsafePointer { - err = NewFatalError("which is not a unsafe.Pointer") - return - } - - err = errors.New("") - if c.Pointer() == e.Pointer() { - err = nil - } - return -} - -func checkForNil(c reflect.Value) (err error) { - err = errors.New("") - - // Make sure it is legal to call IsNil. - switch c.Kind() { - case reflect.Invalid: - case reflect.Chan: - case reflect.Func: - case reflect.Interface: - case reflect.Map: - case reflect.Ptr: - case reflect.Slice: - - default: - err = NewFatalError("which cannot be compared to nil") - return - } - - // Ask whether the value is nil. Handle a nil literal (kind Invalid) - // specially, since it's not legal to call IsNil there. - if c.Kind() == reflect.Invalid || c.IsNil() { - err = nil - } - return -} - -//////////////////////////////////////////////////////////////////////// -// Public implementation -//////////////////////////////////////////////////////////////////////// - -func (m *equalsMatcher) Matches(candidate interface{}) error { - e := m.expectedValue - c := reflect.ValueOf(candidate) - ek := e.Kind() - - switch { - case ek == reflect.Bool: - return checkAgainstBool(e.Bool(), c) - - case isSignedInteger(e): - return checkAgainstInt64(e.Int(), c) - - case isUnsignedInteger(e): - return checkAgainstUint64(e.Uint(), c) - - case ek == reflect.Float32: - return checkAgainstFloat32(float32(e.Float()), c) - - case ek == reflect.Float64: - return checkAgainstFloat64(e.Float(), c) - - case ek == reflect.Complex64: - return checkAgainstComplex64(complex64(e.Complex()), c) - - case ek == reflect.Complex128: - return checkAgainstComplex128(complex128(e.Complex()), c) - - case ek == reflect.Chan: - return checkAgainstChan(e, c) - - case ek == reflect.Func: - return checkAgainstFunc(e, c) - - case ek == reflect.Map: - return checkAgainstMap(e, c) - - case ek == reflect.Ptr: - return checkAgainstPtr(e, c) - - case ek == reflect.Slice: - return checkAgainstSlice(e, c) - - case ek == reflect.String: - return checkAgainstString(e, c) - - case ek == reflect.Array: - return checkAgainstArray(e, c) - - case ek == reflect.UnsafePointer: - return checkAgainstUnsafePointer(e, c) - - case ek == reflect.Invalid: - return checkForNil(c) - } - - panic(fmt.Sprintf("equalsMatcher.Matches: unexpected kind: %v", ek)) -} - -func (m *equalsMatcher) Description() string { - // Special case: handle nil. - if !m.expectedValue.IsValid() { - return "is nil" - } - - return fmt.Sprintf("%v", m.expectedValue.Interface()) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/error.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/error.go deleted file mode 100755 index 52265ee..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/error.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -// Error returns a matcher that matches non-nil values implementing the -// built-in error interface for whom the return value of Error() matches the -// supplied matcher. -// -// For example: -// -// err := errors.New("taco burrito") -// -// Error(Equals("taco burrito")) // matches err -// Error(HasSubstr("taco")) // matches err -// Error(HasSubstr("enchilada")) // doesn't match err -// -func Error(m Matcher) Matcher { - return &errorMatcher{m} -} - -type errorMatcher struct { - wrappedMatcher Matcher -} - -func (m *errorMatcher) Description() string { - return "error " + m.wrappedMatcher.Description() -} - -func (m *errorMatcher) Matches(c interface{}) error { - // Make sure that c is an error. - e, ok := c.(error) - if !ok { - return NewFatalError("which is not an error") - } - - // Pass on the error text to the wrapped matcher. - return m.wrappedMatcher.Matches(e.Error()) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go deleted file mode 100755 index b6811d0..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "fmt" - "reflect" -) - -// GreaterOrEqual returns a matcher that matches integer, floating point, or -// strings values v such that v >= x. Comparison is not defined between numeric -// and string types, but is defined between all integer and floating point -// types. -// -// x must itself be an integer, floating point, or string type; otherwise, -// GreaterOrEqual will panic. -func GreaterOrEqual(x interface{}) Matcher { - desc := fmt.Sprintf("greater than or equal to %v", x) - - // Special case: make it clear that strings are strings. - if reflect.TypeOf(x).Kind() == reflect.String { - desc = fmt.Sprintf("greater than or equal to \"%s\"", x) - } - - return transformDescription(Not(LessThan(x)), desc) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go deleted file mode 100755 index 74d9db9..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "fmt" - "reflect" -) - -// GreaterThan returns a matcher that matches integer, floating point, or -// strings values v such that v > x. Comparison is not defined between numeric -// and string types, but is defined between all integer and floating point -// types. -// -// x must itself be an integer, floating point, or string type; otherwise, -// GreaterThan will panic. -func GreaterThan(x interface{}) Matcher { - desc := fmt.Sprintf("greater than %v", x) - - // Special case: make it clear that strings are strings. - if reflect.TypeOf(x).Kind() == reflect.String { - desc = fmt.Sprintf("greater than \"%s\"", x) - } - - return transformDescription(Not(LessOrEqual(x)), desc) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as.go deleted file mode 100755 index e9b4618..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "fmt" - "reflect" -) - -// HasSameTypeAs returns a matcher that matches values with exactly the same -// type as the supplied prototype. -func HasSameTypeAs(p interface{}) Matcher { - expected := reflect.TypeOf(p) - pred := func(c interface{}) error { - actual := reflect.TypeOf(c) - if actual != expected { - return fmt.Errorf("which has type %v", actual) - } - - return nil - } - - return NewMatcher(pred, fmt.Sprintf("has type %v", expected)) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_substr.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_substr.go deleted file mode 100755 index 3b2f218..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_substr.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "reflect" - "strings" -) - -// HasSubstr returns a matcher that matches strings containing s as a -// substring. -func HasSubstr(s string) Matcher { - return NewMatcher( - func(c interface{}) error { return hasSubstr(s, c) }, - fmt.Sprintf("has substring \"%s\"", s)) -} - -func hasSubstr(needle string, c interface{}) error { - v := reflect.ValueOf(c) - if v.Kind() != reflect.String { - return NewFatalError("which is not a string") - } - - // Perform the substring search. - haystack := v.String() - if strings.Contains(haystack, needle) { - return nil - } - - return errors.New("") -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/identical_to.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/identical_to.go deleted file mode 100755 index 5b8cb16..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/identical_to.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2012 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "reflect" -) - -// Is the type comparable according to the definition here? -// -// http://weekly.golang.org/doc/go_spec.html#Comparison_operators -// -func isComparable(t reflect.Type) bool { - switch t.Kind() { - case reflect.Array: - return isComparable(t.Elem()) - - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if !isComparable(t.Field(i).Type) { - return false - } - } - - return true - - case reflect.Slice, reflect.Map, reflect.Func: - return false - } - - return true -} - -// Should the supplied type be allowed as an argument to IdenticalTo? -func isLegalForIdenticalTo(t reflect.Type) (bool, error) { - // Allow the zero type. - if t == nil { - return true, nil - } - - // Reference types are always okay; we compare pointers. - switch t.Kind() { - case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan: - return true, nil - } - - // Reject other non-comparable types. - if !isComparable(t) { - return false, errors.New(fmt.Sprintf("%v is not comparable", t)) - } - - return true, nil -} - -// IdenticalTo(x) returns a matcher that matches values v with type identical -// to x such that: -// -// 1. If v and x are of a reference type (slice, map, function, channel), then -// they are either both nil or are references to the same object. -// -// 2. Otherwise, if v and x are not of a reference type but have a valid type, -// then v == x. -// -// If v and x are both the invalid type (which results from the predeclared nil -// value, or from nil interface variables), then the matcher is satisfied. -// -// This function will panic if x is of a value type that is not comparable. For -// example, x cannot be an array of functions. -func IdenticalTo(x interface{}) Matcher { - t := reflect.TypeOf(x) - - // Reject illegal arguments. - if ok, err := isLegalForIdenticalTo(t); !ok { - panic("IdenticalTo: " + err.Error()) - } - - return &identicalToMatcher{x} -} - -type identicalToMatcher struct { - x interface{} -} - -func (m *identicalToMatcher) Description() string { - t := reflect.TypeOf(m.x) - return fmt.Sprintf("identical to <%v> %v", t, m.x) -} - -func (m *identicalToMatcher) Matches(c interface{}) error { - // Make sure the candidate's type is correct. - t := reflect.TypeOf(m.x) - if ct := reflect.TypeOf(c); t != ct { - return NewFatalError(fmt.Sprintf("which is of type %v", ct)) - } - - // Special case: two values of the invalid type are always identical. - if t == nil { - return nil - } - - // Handle reference types. - switch t.Kind() { - case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan: - xv := reflect.ValueOf(m.x) - cv := reflect.ValueOf(c) - if xv.Pointer() == cv.Pointer() { - return nil - } - - return errors.New("which is not an identical reference") - } - - // Are the values equal? - if m.x == c { - return nil - } - - return errors.New("") -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go deleted file mode 100755 index 63ffd3a..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "fmt" - "reflect" -) - -// LessOrEqual returns a matcher that matches integer, floating point, or -// strings values v such that v <= x. Comparison is not defined between numeric -// and string types, but is defined between all integer and floating point -// types. -// -// x must itself be an integer, floating point, or string type; otherwise, -// LessOrEqual will panic. -func LessOrEqual(x interface{}) Matcher { - desc := fmt.Sprintf("less than or equal to %v", x) - - // Special case: make it clear that strings are strings. - if reflect.TypeOf(x).Kind() == reflect.String { - desc = fmt.Sprintf("less than or equal to \"%s\"", x) - } - - // Put LessThan last so that its error messages will be used in the event of - // failure. - return transformDescription(AnyOf(Equals(x), LessThan(x)), desc) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go deleted file mode 100755 index e90a430..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -// LessThan returns a matcher that matches integer, floating point, or strings -// values v such that v < x. Comparison is not defined between numeric and -// string types, but is defined between all integer and floating point types. -// -// x must itself be an integer, floating point, or string type; otherwise, -// LessThan will panic. -func LessThan(x interface{}) Matcher { - v := reflect.ValueOf(x) - kind := v.Kind() - - switch { - case isInteger(v): - case isFloat(v): - case kind == reflect.String: - - default: - panic(fmt.Sprintf("LessThan: unexpected kind %v", kind)) - } - - return &lessThanMatcher{v} -} - -type lessThanMatcher struct { - limit reflect.Value -} - -func (m *lessThanMatcher) Description() string { - // Special case: make it clear that strings are strings. - if m.limit.Kind() == reflect.String { - return fmt.Sprintf("less than \"%s\"", m.limit.String()) - } - - return fmt.Sprintf("less than %v", m.limit.Interface()) -} - -func compareIntegers(v1, v2 reflect.Value) (err error) { - err = errors.New("") - - switch { - case isSignedInteger(v1) && isSignedInteger(v2): - if v1.Int() < v2.Int() { - err = nil - } - return - - case isSignedInteger(v1) && isUnsignedInteger(v2): - if v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() { - err = nil - } - return - - case isUnsignedInteger(v1) && isSignedInteger(v2): - if v1.Uint() <= math.MaxInt64 && int64(v1.Uint()) < v2.Int() { - err = nil - } - return - - case isUnsignedInteger(v1) && isUnsignedInteger(v2): - if v1.Uint() < v2.Uint() { - err = nil - } - return - } - - panic(fmt.Sprintf("compareIntegers: %v %v", v1, v2)) -} - -func getFloat(v reflect.Value) float64 { - switch { - case isSignedInteger(v): - return float64(v.Int()) - - case isUnsignedInteger(v): - return float64(v.Uint()) - - case isFloat(v): - return v.Float() - } - - panic(fmt.Sprintf("getFloat: %v", v)) -} - -func (m *lessThanMatcher) Matches(c interface{}) (err error) { - v1 := reflect.ValueOf(c) - v2 := m.limit - - err = errors.New("") - - // Handle strings as a special case. - if v1.Kind() == reflect.String && v2.Kind() == reflect.String { - if v1.String() < v2.String() { - err = nil - } - return - } - - // If we get here, we require that we are dealing with integers or floats. - v1Legal := isInteger(v1) || isFloat(v1) - v2Legal := isInteger(v2) || isFloat(v2) - if !v1Legal || !v2Legal { - err = NewFatalError("which is not comparable") - return - } - - // Handle the various comparison cases. - switch { - // Both integers - case isInteger(v1) && isInteger(v2): - return compareIntegers(v1, v2) - - // At least one float32 - case v1.Kind() == reflect.Float32 || v2.Kind() == reflect.Float32: - if float32(getFloat(v1)) < float32(getFloat(v2)) { - err = nil - } - return - - // At least one float64 - case v1.Kind() == reflect.Float64 || v2.Kind() == reflect.Float64: - if getFloat(v1) < getFloat(v2) { - err = nil - } - return - } - - // We shouldn't get here. - panic(fmt.Sprintf("lessThanMatcher.Matches: Shouldn't get here: %v %v", v1, v2)) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go deleted file mode 100755 index dc750c5..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package oglematchers provides a set of matchers useful in a testing or -// mocking framework. These matchers are inspired by and mostly compatible with -// Google Test for C++ and Google JS Test. -// -// This package is used by github.com/smartystreets/assertions/internal/ogletest and -// github.com/smartystreets/assertions/internal/oglemock, which may be more directly useful if you're not -// writing your own testing package or defining your own matchers. -package oglematchers - -// A Matcher is some predicate implicitly defining a set of values that it -// matches. For example, GreaterThan(17) matches all numeric values greater -// than 17, and HasSubstr("taco") matches all strings with the substring -// "taco". -// -// Matchers are typically exposed to tests via constructor functions like -// HasSubstr. In order to implement such a function you can either define your -// own matcher type or use NewMatcher. -type Matcher interface { - // Check whether the supplied value belongs to the the set defined by the - // matcher. Return a non-nil error if and only if it does not. - // - // The error describes why the value doesn't match. The error text is a - // relative clause that is suitable for being placed after the value. For - // example, a predicate that matches strings with a particular substring may, - // when presented with a numerical value, return the following error text: - // - // "which is not a string" - // - // Then the failure message may look like: - // - // Expected: has substring "taco" - // Actual: 17, which is not a string - // - // If the error is self-apparent based on the description of the matcher, the - // error text may be empty (but the error still non-nil). For example: - // - // Expected: 17 - // Actual: 19 - // - // If you are implementing a new matcher, see also the documentation on - // FatalError. - Matches(candidate interface{}) error - - // Description returns a string describing the property that values matching - // this matcher have, as a verb phrase where the subject is the value. For - // example, "is greather than 17" or "has substring "taco"". - Description() string -} - -// FatalError is an implementation of the error interface that may be returned -// from matchers, indicating the error should be propagated. Returning a -// *FatalError indicates that the matcher doesn't process values of the -// supplied type, or otherwise doesn't know how to handle the value. -// -// For example, if GreaterThan(17) returned false for the value "taco" without -// a fatal error, then Not(GreaterThan(17)) would return true. This is -// technically correct, but is surprising and may mask failures where the wrong -// sort of matcher is accidentally used. Instead, GreaterThan(17) can return a -// fatal error, which will be propagated by Not(). -type FatalError struct { - errorText string -} - -// NewFatalError creates a FatalError struct with the supplied error text. -func NewFatalError(s string) *FatalError { - return &FatalError{s} -} - -func (e *FatalError) Error() string { - return e.errorText -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp.go deleted file mode 100755 index ea174dc..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "reflect" - "regexp" -) - -// MatchesRegexp returns a matcher that matches strings and byte slices whose -// contents match the supplied regular expression. The semantics are those of -// regexp.Match. In particular, that means the match is not implicitly anchored -// to the ends of the string: MatchesRegexp("bar") will match "foo bar baz". -func MatchesRegexp(pattern string) Matcher { - re, err := regexp.Compile(pattern) - if err != nil { - panic("MatchesRegexp: " + err.Error()) - } - - return &matchesRegexpMatcher{re} -} - -type matchesRegexpMatcher struct { - re *regexp.Regexp -} - -func (m *matchesRegexpMatcher) Description() string { - return fmt.Sprintf("matches regexp \"%s\"", m.re.String()) -} - -func (m *matchesRegexpMatcher) Matches(c interface{}) (err error) { - v := reflect.ValueOf(c) - isString := v.Kind() == reflect.String - isByteSlice := v.Kind() == reflect.Slice && v.Elem().Kind() == reflect.Uint8 - - err = errors.New("") - - switch { - case isString: - if m.re.MatchString(v.String()) { - err = nil - } - - case isByteSlice: - if m.re.Match(v.Bytes()) { - err = nil - } - - default: - err = NewFatalError("which is not a string or []byte") - } - - return -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/new_matcher.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/new_matcher.go deleted file mode 100755 index e49555d..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/new_matcher.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -// Create a matcher with the given description and predicate function, which -// will be invoked to handle calls to Matchers. -// -// Using this constructor may be a convenience over defining your own type that -// implements Matcher if you do not need any logic in your Description method. -func NewMatcher( - predicate func(interface{}) error, - description string) Matcher { - return &predicateMatcher{ - predicate: predicate, - description: description, - } -} - -type predicateMatcher struct { - predicate func(interface{}) error - description string -} - -func (pm *predicateMatcher) Matches(c interface{}) error { - return pm.predicate(c) -} - -func (pm *predicateMatcher) Description() string { - return pm.description -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go deleted file mode 100755 index 097c3ca..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" -) - -// Not returns a matcher that inverts the set of values matched by the wrapped -// matcher. It does not transform the result for values for which the wrapped -// matcher returns a fatal error. -func Not(m Matcher) Matcher { - return ¬Matcher{m} -} - -type notMatcher struct { - wrapped Matcher -} - -func (m *notMatcher) Matches(c interface{}) (err error) { - err = m.wrapped.Matches(c) - - // Did the wrapped matcher say yes? - if err == nil { - return errors.New("") - } - - // Did the wrapped matcher return a fatal error? - if _, isFatal := err.(*FatalError); isFatal { - return err - } - - // The wrapped matcher returned a non-fatal error. - return nil -} - -func (m *notMatcher) Description() string { - return fmt.Sprintf("not(%s)", m.wrapped.Description()) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/panics.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/panics.go deleted file mode 100755 index be07b5e..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/panics.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "reflect" -) - -// Panics matches zero-arg functions which, when invoked, panic with an error -// that matches the supplied matcher. -// -// NOTE(jacobsa): This matcher cannot detect the case where the function panics -// using panic(nil), by design of the language. See here for more info: -// -// http://goo.gl/9aIQL -// -func Panics(m Matcher) Matcher { - return &panicsMatcher{m} -} - -type panicsMatcher struct { - wrappedMatcher Matcher -} - -func (m *panicsMatcher) Description() string { - return "panics with: " + m.wrappedMatcher.Description() -} - -func (m *panicsMatcher) Matches(c interface{}) (err error) { - // Make sure c is a zero-arg function. - v := reflect.ValueOf(c) - if v.Kind() != reflect.Func || v.Type().NumIn() != 0 { - err = NewFatalError("which is not a zero-arg function") - return - } - - // Call the function and check its panic error. - defer func() { - if e := recover(); e != nil { - err = m.wrappedMatcher.Matches(e) - - // Set a clearer error message if the matcher said no. - if err != nil { - wrappedClause := "" - if err.Error() != "" { - wrappedClause = ", " + err.Error() - } - - err = errors.New(fmt.Sprintf("which panicked with: %v%s", e, wrappedClause)) - } - } - }() - - v.Call([]reflect.Value{}) - - // If we get here, the function didn't panic. - err = errors.New("which didn't panic") - return -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/pointee.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/pointee.go deleted file mode 100755 index adf6a35..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/pointee.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2012 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -import ( - "errors" - "fmt" - "reflect" -) - -// Return a matcher that matches non-nil pointers whose pointee matches the -// wrapped matcher. -func Pointee(m Matcher) Matcher { - return &pointeeMatcher{m} -} - -type pointeeMatcher struct { - wrapped Matcher -} - -func (m *pointeeMatcher) Matches(c interface{}) (err error) { - // Make sure the candidate is of the appropriate type. - cv := reflect.ValueOf(c) - if !cv.IsValid() || cv.Kind() != reflect.Ptr { - return NewFatalError("which is not a pointer") - } - - // Make sure the candidate is non-nil. - if cv.IsNil() { - return NewFatalError("") - } - - // Defer to the wrapped matcher. Fix up empty errors so that failure messages - // are more helpful than just printing a pointer for "Actual". - pointee := cv.Elem().Interface() - err = m.wrapped.Matches(pointee) - if err != nil && err.Error() == "" { - s := fmt.Sprintf("whose pointee is %v", pointee) - - if _, ok := err.(*FatalError); ok { - err = NewFatalError(s) - } else { - err = errors.New(s) - } - } - - return err -} - -func (m *pointeeMatcher) Description() string { - return fmt.Sprintf("pointee(%s)", m.wrapped.Description()) -} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go deleted file mode 100755 index 1eef126..0000000 --- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2011 Aaron Jacobs. All Rights Reserved. -// Author: aaronjjacobs@gmail.com (Aaron Jacobs) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oglematchers - -// transformDescription returns a matcher that is equivalent to the supplied -// one, except that it has the supplied description instead of the one attached -// to the existing matcher. -func transformDescription(m Matcher, newDesc string) Matcher { - return &transformDescriptionMatcher{newDesc, m} -} - -type transformDescriptionMatcher struct { - desc string - wrappedMatcher Matcher -} - -func (m *transformDescriptionMatcher) Description() string { - return m.desc -} - -func (m *transformDescriptionMatcher) Matches(c interface{}) error { - return m.wrappedMatcher.Matches(c) -} diff --git a/vendor/github.com/smartystreets/assertions/messages.go b/vendor/github.com/smartystreets/assertions/messages.go deleted file mode 100755 index 2fbf8c6..0000000 --- a/vendor/github.com/smartystreets/assertions/messages.go +++ /dev/null @@ -1,93 +0,0 @@ -package assertions - -const ( // equality - shouldHaveBeenEqual = "Expected: '%v'\nActual: '%v'\n(Should be equal)" - shouldNotHaveBeenEqual = "Expected '%v'\nto NOT equal '%v'\n(but it did)!" - shouldHaveBeenEqualTypeMismatch = "Expected: '%v' (%T)\nActual: '%v' (%T)\n(Should be equal, type mismatch)" - shouldHaveBeenAlmostEqual = "Expected '%v' to almost equal '%v' (but it didn't)!" - shouldHaveNotBeenAlmostEqual = "Expected '%v' to NOT almost equal '%v' (but it did)!" - shouldHaveResembled = "Expected: '%s'\nActual: '%s'\n(Should resemble)!" - shouldNotHaveResembled = "Expected '%#v'\nto NOT resemble '%#v'\n(but it did)!" - shouldBePointers = "Both arguments should be pointers " - shouldHaveBeenNonNilPointer = shouldBePointers + "(the %s was %s)!" - shouldHavePointedTo = "Expected '%+v' (address: '%v') and '%+v' (address: '%v') to be the same address (but their weren't)!" - shouldNotHavePointedTo = "Expected '%+v' and '%+v' to be different references (but they matched: '%v')!" - shouldHaveBeenNil = "Expected: nil\nActual: '%v'" - shouldNotHaveBeenNil = "Expected '%+v' to NOT be nil (but it was)!" - shouldHaveBeenTrue = "Expected: true\nActual: %v" - shouldHaveBeenFalse = "Expected: false\nActual: %v" - shouldHaveBeenZeroValue = "'%+v' should have been the zero value" //"Expected: (zero value)\nActual: %v" -) - -const ( // quantity comparisons - shouldHaveBeenGreater = "Expected '%v' to be greater than '%v' (but it wasn't)!" - shouldHaveBeenGreaterOrEqual = "Expected '%v' to be greater than or equal to '%v' (but it wasn't)!" - shouldHaveBeenLess = "Expected '%v' to be less than '%v' (but it wasn't)!" - shouldHaveBeenLessOrEqual = "Expected '%v' to be less than or equal to '%v' (but it wasn't)!" - shouldHaveBeenBetween = "Expected '%v' to be between '%v' and '%v' (but it wasn't)!" - shouldNotHaveBeenBetween = "Expected '%v' NOT to be between '%v' and '%v' (but it was)!" - shouldHaveDifferentUpperAndLower = "The lower and upper bounds must be different values (they were both '%v')." - shouldHaveBeenBetweenOrEqual = "Expected '%v' to be between '%v' and '%v' or equal to one of them (but it wasn't)!" - shouldNotHaveBeenBetweenOrEqual = "Expected '%v' NOT to be between '%v' and '%v' or equal to one of them (but it was)!" -) - -const ( // collections - shouldHaveContained = "Expected the container (%v) to contain: '%v' (but it didn't)!" - shouldNotHaveContained = "Expected the container (%v) NOT to contain: '%v' (but it did)!" - shouldHaveContainedKey = "Expected the %v to contain the key: %v (but it didn't)!" - shouldNotHaveContainedKey = "Expected the %v NOT to contain the key: %v (but it did)!" - shouldHaveBeenIn = "Expected '%v' to be in the container (%v), but it wasn't!" - shouldNotHaveBeenIn = "Expected '%v' NOT to be in the container (%v), but it was!" - shouldHaveBeenAValidCollection = "You must provide a valid container (was %v)!" - shouldHaveBeenAValidMap = "You must provide a valid map type (was %v)!" - shouldHaveBeenEmpty = "Expected %+v to be empty (but it wasn't)!" - shouldNotHaveBeenEmpty = "Expected %+v to NOT be empty (but it was)!" - shouldHaveBeenAValidInteger = "You must provide a valid integer (was %v)!" - shouldHaveBeenAValidLength = "You must provide a valid positive integer (was %v)!" - shouldHaveHadLength = "Expected %+v (length: %v) to have length equal to '%v', but it wasn't!" -) - -const ( // strings - shouldHaveStartedWith = "Expected '%v'\nto start with '%v'\n(but it didn't)!" - shouldNotHaveStartedWith = "Expected '%v'\nNOT to start with '%v'\n(but it did)!" - shouldHaveEndedWith = "Expected '%v'\nto end with '%v'\n(but it didn't)!" - shouldNotHaveEndedWith = "Expected '%v'\nNOT to end with '%v'\n(but it did)!" - shouldAllBeStrings = "All arguments to this assertion must be strings (you provided: %v)." - shouldBothBeStrings = "Both arguments to this assertion must be strings (you provided %v and %v)." - shouldBeString = "The argument to this assertion must be a string (you provided %v)." - shouldHaveContainedSubstring = "Expected '%s' to contain substring '%s' (but it didn't)!" - shouldNotHaveContainedSubstring = "Expected '%s' NOT to contain substring '%s' (but it did)!" - shouldHaveBeenBlank = "Expected '%s' to be blank (but it wasn't)!" - shouldNotHaveBeenBlank = "Expected value to NOT be blank (but it was)!" -) - -const ( // panics - shouldUseVoidNiladicFunction = "You must provide a void, niladic function as the first argument!" - shouldHavePanickedWith = "Expected func() to panic with '%v' (but it panicked with '%v')!" - shouldHavePanicked = "Expected func() to panic (but it didn't)!" - shouldNotHavePanicked = "Expected func() NOT to panic (error: '%+v')!" - shouldNotHavePanickedWith = "Expected func() NOT to panic with '%v' (but it did)!" -) - -const ( // type checking - shouldHaveBeenA = "Expected '%v' to be: '%v' (but was: '%v')!" - shouldNotHaveBeenA = "Expected '%v' to NOT be: '%v' (but it was)!" - - shouldHaveImplemented = "Expected: '%v interface support'\nActual: '%v' does not implement the interface!" - shouldNotHaveImplemented = "Expected '%v'\nto NOT implement '%v'\n(but it did)!" - shouldCompareWithInterfacePointer = "The expected value must be a pointer to an interface type (eg. *fmt.Stringer)" - shouldNotBeNilActual = "The actual value was 'nil' and should be a value or a pointer to a value!" -) - -const ( // time comparisons - shouldUseTimes = "You must provide time instances as arguments to this assertion." - shouldUseTimeSlice = "You must provide a slice of time instances as the first argument to this assertion." - shouldUseDurationAndTime = "You must provide a duration and a time as arguments to this assertion." - shouldHaveHappenedBefore = "Expected '%v' to happen before '%v' (it happened '%v' after)!" - shouldHaveHappenedAfter = "Expected '%v' to happen after '%v' (it happened '%v' before)!" - shouldHaveHappenedBetween = "Expected '%v' to happen between '%v' and '%v' (it happened '%v' outside threshold)!" - shouldNotHaveHappenedOnOrBetween = "Expected '%v' to NOT happen on or between '%v' and '%v' (but it did)!" - - // format params: incorrect-index, previous-index, previous-time, incorrect-index, incorrect-time - shouldHaveBeenChronological = "The 'Time' at index [%d] should have happened after the previous one (but it didn't!):\n [%d]: %s\n [%d]: %s (see, it happened before!)" -) diff --git a/vendor/github.com/smartystreets/assertions/panic.go b/vendor/github.com/smartystreets/assertions/panic.go deleted file mode 100755 index 21f92dc..0000000 --- a/vendor/github.com/smartystreets/assertions/panic.go +++ /dev/null @@ -1,115 +0,0 @@ -package assertions - -import "fmt" - -// ShouldPanic receives a void, niladic function and expects to recover a panic. -func ShouldPanic(actual interface{}, expected ...interface{}) (message string) { - if fail := need(0, expected); fail != success { - return fail - } - - action, _ := actual.(func()) - - if action == nil { - message = shouldUseVoidNiladicFunction - return - } - - defer func() { - recovered := recover() - if recovered == nil { - message = shouldHavePanicked - } else { - message = success - } - }() - action() - - return -} - -// ShouldNotPanic receives a void, niladic function and expects to execute the function without any panic. -func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string) { - if fail := need(0, expected); fail != success { - return fail - } - - action, _ := actual.(func()) - - if action == nil { - message = shouldUseVoidNiladicFunction - return - } - - defer func() { - recovered := recover() - if recovered != nil { - message = fmt.Sprintf(shouldNotHavePanicked, recovered) - } else { - message = success - } - }() - action() - - return -} - -// ShouldPanicWith receives a void, niladic function and expects to recover a panic with the second argument as the content. -func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string) { - if fail := need(1, expected); fail != success { - return fail - } - - action, _ := actual.(func()) - - if action == nil { - message = shouldUseVoidNiladicFunction - return - } - - defer func() { - recovered := recover() - if recovered == nil { - message = shouldHavePanicked - } else { - if equal := ShouldEqual(recovered, expected[0]); equal != success { - message = serializer.serialize(expected[0], recovered, fmt.Sprintf(shouldHavePanickedWith, expected[0], recovered)) - } else { - message = success - } - } - }() - action() - - return -} - -// ShouldNotPanicWith receives a void, niladic function and expects to recover a panic whose content differs from the second argument. -func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string) { - if fail := need(1, expected); fail != success { - return fail - } - - action, _ := actual.(func()) - - if action == nil { - message = shouldUseVoidNiladicFunction - return - } - - defer func() { - recovered := recover() - if recovered == nil { - message = success - } else { - if equal := ShouldEqual(recovered, expected[0]); equal == success { - message = fmt.Sprintf(shouldNotHavePanickedWith, expected[0]) - } else { - message = success - } - } - }() - action() - - return -} diff --git a/vendor/github.com/smartystreets/assertions/quantity.go b/vendor/github.com/smartystreets/assertions/quantity.go deleted file mode 100755 index 188379f..0000000 --- a/vendor/github.com/smartystreets/assertions/quantity.go +++ /dev/null @@ -1,141 +0,0 @@ -package assertions - -import ( - "fmt" - - "github.com/smartystreets/assertions/internal/oglematchers" -) - -// ShouldBeGreaterThan receives exactly two parameters and ensures that the first is greater than the second. -func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - if matchError := oglematchers.GreaterThan(expected[0]).Matches(actual); matchError != nil { - return fmt.Sprintf(shouldHaveBeenGreater, actual, expected[0]) - } - return success -} - -// ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that the first is greater than or equal to the second. -func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } else if matchError := oglematchers.GreaterOrEqual(expected[0]).Matches(actual); matchError != nil { - return fmt.Sprintf(shouldHaveBeenGreaterOrEqual, actual, expected[0]) - } - return success -} - -// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than the second. -func ShouldBeLessThan(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } else if matchError := oglematchers.LessThan(expected[0]).Matches(actual); matchError != nil { - return fmt.Sprintf(shouldHaveBeenLess, actual, expected[0]) - } - return success -} - -// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than or equal to the second. -func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } else if matchError := oglematchers.LessOrEqual(expected[0]).Matches(actual); matchError != nil { - return fmt.Sprintf(shouldHaveBeenLessOrEqual, actual, expected[0]) - } - return success -} - -// ShouldBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound. -// It ensures that the actual value is between both bounds (but not equal to either of them). -func ShouldBeBetween(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - lower, upper, fail := deriveBounds(expected) - - if fail != success { - return fail - } else if !isBetween(actual, lower, upper) { - return fmt.Sprintf(shouldHaveBeenBetween, actual, lower, upper) - } - return success -} - -// ShouldNotBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound. -// It ensures that the actual value is NOT between both bounds. -func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - lower, upper, fail := deriveBounds(expected) - - if fail != success { - return fail - } else if isBetween(actual, lower, upper) { - return fmt.Sprintf(shouldNotHaveBeenBetween, actual, lower, upper) - } - return success -} -func deriveBounds(values []interface{}) (lower interface{}, upper interface{}, fail string) { - lower = values[0] - upper = values[1] - - if ShouldNotEqual(lower, upper) != success { - return nil, nil, fmt.Sprintf(shouldHaveDifferentUpperAndLower, lower) - } else if ShouldBeLessThan(lower, upper) != success { - lower, upper = upper, lower - } - return lower, upper, success -} -func isBetween(value, lower, upper interface{}) bool { - if ShouldBeGreaterThan(value, lower) != success { - return false - } else if ShouldBeLessThan(value, upper) != success { - return false - } - return true -} - -// ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound. -// It ensures that the actual value is between both bounds or equal to one of them. -func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - lower, upper, fail := deriveBounds(expected) - - if fail != success { - return fail - } else if !isBetweenOrEqual(actual, lower, upper) { - return fmt.Sprintf(shouldHaveBeenBetweenOrEqual, actual, lower, upper) - } - return success -} - -// ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound. -// It ensures that the actual value is nopt between the bounds nor equal to either of them. -func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - lower, upper, fail := deriveBounds(expected) - - if fail != success { - return fail - } else if isBetweenOrEqual(actual, lower, upper) { - return fmt.Sprintf(shouldNotHaveBeenBetweenOrEqual, actual, lower, upper) - } - return success -} - -func isBetweenOrEqual(value, lower, upper interface{}) bool { - if ShouldBeGreaterThanOrEqualTo(value, lower) != success { - return false - } else if ShouldBeLessThanOrEqualTo(value, upper) != success { - return false - } - return true -} diff --git a/vendor/github.com/smartystreets/assertions/serializer.go b/vendor/github.com/smartystreets/assertions/serializer.go deleted file mode 100755 index caf7742..0000000 --- a/vendor/github.com/smartystreets/assertions/serializer.go +++ /dev/null @@ -1,69 +0,0 @@ -package assertions - -import ( - "encoding/json" - "fmt" - - "github.com/smartystreets/assertions/internal/go-render/render" -) - -type Serializer interface { - serialize(expected, actual interface{}, message string) string - serializeDetailed(expected, actual interface{}, message string) string -} - -type failureSerializer struct{} - -func (self *failureSerializer) serializeDetailed(expected, actual interface{}, message string) string { - view := FailureView{ - Message: message, - Expected: render.Render(expected), - Actual: render.Render(actual), - } - serialized, err := json.Marshal(view) - if err != nil { - return message - } - return string(serialized) -} - -func (self *failureSerializer) serialize(expected, actual interface{}, message string) string { - view := FailureView{ - Message: message, - Expected: fmt.Sprintf("%+v", expected), - Actual: fmt.Sprintf("%+v", actual), - } - serialized, err := json.Marshal(view) - if err != nil { - return message - } - return string(serialized) -} - -func newSerializer() *failureSerializer { - return &failureSerializer{} -} - -/////////////////////////////////////////////////////////////////////////////// - -// This struct is also declared in github.com/smartystreets/goconvey/convey/reporting. -// The json struct tags should be equal in both declarations. -type FailureView struct { - Message string `json:"Message"` - Expected string `json:"Expected"` - Actual string `json:"Actual"` -} - -/////////////////////////////////////////////////////// - -// noopSerializer just gives back the original message. This is useful when we are using -// the assertions from a context other than the web UI, that requires the JSON structure -// provided by the failureSerializer. -type noopSerializer struct{} - -func (self *noopSerializer) serialize(expected, actual interface{}, message string) string { - return message -} -func (self *noopSerializer) serializeDetailed(expected, actual interface{}, message string) string { - return message -} diff --git a/vendor/github.com/smartystreets/assertions/strings.go b/vendor/github.com/smartystreets/assertions/strings.go deleted file mode 100755 index f4717ec..0000000 --- a/vendor/github.com/smartystreets/assertions/strings.go +++ /dev/null @@ -1,227 +0,0 @@ -package assertions - -import ( - "fmt" - "reflect" - "strings" -) - -// ShouldStartWith receives exactly 2 string parameters and ensures that the first starts with the second. -func ShouldStartWith(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - value, valueIsString := actual.(string) - prefix, prefixIsString := expected[0].(string) - - if !valueIsString || !prefixIsString { - return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) - } - - return shouldStartWith(value, prefix) -} -func shouldStartWith(value, prefix string) string { - if !strings.HasPrefix(value, prefix) { - shortval := value - if len(shortval) > len(prefix) { - shortval = shortval[:len(prefix)] + "..." - } - return serializer.serialize(prefix, shortval, fmt.Sprintf(shouldHaveStartedWith, value, prefix)) - } - return success -} - -// ShouldNotStartWith receives exactly 2 string parameters and ensures that the first does not start with the second. -func ShouldNotStartWith(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - value, valueIsString := actual.(string) - prefix, prefixIsString := expected[0].(string) - - if !valueIsString || !prefixIsString { - return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) - } - - return shouldNotStartWith(value, prefix) -} -func shouldNotStartWith(value, prefix string) string { - if strings.HasPrefix(value, prefix) { - if value == "" { - value = "" - } - if prefix == "" { - prefix = "" - } - return fmt.Sprintf(shouldNotHaveStartedWith, value, prefix) - } - return success -} - -// ShouldEndWith receives exactly 2 string parameters and ensures that the first ends with the second. -func ShouldEndWith(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - value, valueIsString := actual.(string) - suffix, suffixIsString := expected[0].(string) - - if !valueIsString || !suffixIsString { - return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) - } - - return shouldEndWith(value, suffix) -} -func shouldEndWith(value, suffix string) string { - if !strings.HasSuffix(value, suffix) { - shortval := value - if len(shortval) > len(suffix) { - shortval = "..." + shortval[len(shortval)-len(suffix):] - } - return serializer.serialize(suffix, shortval, fmt.Sprintf(shouldHaveEndedWith, value, suffix)) - } - return success -} - -// ShouldEndWith receives exactly 2 string parameters and ensures that the first does not end with the second. -func ShouldNotEndWith(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - value, valueIsString := actual.(string) - suffix, suffixIsString := expected[0].(string) - - if !valueIsString || !suffixIsString { - return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) - } - - return shouldNotEndWith(value, suffix) -} -func shouldNotEndWith(value, suffix string) string { - if strings.HasSuffix(value, suffix) { - if value == "" { - value = "" - } - if suffix == "" { - suffix = "" - } - return fmt.Sprintf(shouldNotHaveEndedWith, value, suffix) - } - return success -} - -// ShouldContainSubstring receives exactly 2 string parameters and ensures that the first contains the second as a substring. -func ShouldContainSubstring(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - long, longOk := actual.(string) - short, shortOk := expected[0].(string) - - if !longOk || !shortOk { - return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) - } - - if !strings.Contains(long, short) { - return serializer.serialize(expected[0], actual, fmt.Sprintf(shouldHaveContainedSubstring, long, short)) - } - return success -} - -// ShouldNotContainSubstring receives exactly 2 string parameters and ensures that the first does NOT contain the second as a substring. -func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - long, longOk := actual.(string) - short, shortOk := expected[0].(string) - - if !longOk || !shortOk { - return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) - } - - if strings.Contains(long, short) { - return fmt.Sprintf(shouldNotHaveContainedSubstring, long, short) - } - return success -} - -// ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal to "". -func ShouldBeBlank(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } - value, ok := actual.(string) - if !ok { - return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual)) - } - if value != "" { - return serializer.serialize("", value, fmt.Sprintf(shouldHaveBeenBlank, value)) - } - return success -} - -// ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is equal to "". -func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } - value, ok := actual.(string) - if !ok { - return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual)) - } - if value == "" { - return shouldNotHaveBeenBlank - } - return success -} - -// ShouldEqualWithout receives exactly 3 string parameters and ensures that the first is equal to the second -// after removing all instances of the third from the first using strings.Replace(first, third, "", -1). -func ShouldEqualWithout(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - actualString, ok1 := actual.(string) - expectedString, ok2 := expected[0].(string) - replace, ok3 := expected[1].(string) - - if !ok1 || !ok2 || !ok3 { - return fmt.Sprintf(shouldAllBeStrings, []reflect.Type{ - reflect.TypeOf(actual), - reflect.TypeOf(expected[0]), - reflect.TypeOf(expected[1]), - }) - } - - replaced := strings.Replace(actualString, replace, "", -1) - if replaced == expectedString { - return "" - } - - return fmt.Sprintf("Expected '%s' to equal '%s' but without any '%s' (but it didn't).", actualString, expectedString, replace) -} - -// ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the first is equal to the second -// after removing all leading and trailing whitespace using strings.TrimSpace(first). -func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - actualString, valueIsString := actual.(string) - _, value2IsString := expected[0].(string) - - if !valueIsString || !value2IsString { - return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) - } - - actualString = strings.TrimSpace(actualString) - return ShouldEqual(actualString, expected[0]) -} diff --git a/vendor/github.com/smartystreets/assertions/time.go b/vendor/github.com/smartystreets/assertions/time.go deleted file mode 100755 index 7ba8235..0000000 --- a/vendor/github.com/smartystreets/assertions/time.go +++ /dev/null @@ -1,202 +0,0 @@ -package assertions - -import ( - "fmt" - "time" -) - -// ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the first happens before the second. -func ShouldHappenBefore(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - expectedTime, secondOk := expected[0].(time.Time) - - if !firstOk || !secondOk { - return shouldUseTimes - } - - if !actualTime.Before(expectedTime) { - return fmt.Sprintf(shouldHaveHappenedBefore, actualTime, expectedTime, actualTime.Sub(expectedTime)) - } - - return success -} - -// ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that the first happens on or before the second. -func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - expectedTime, secondOk := expected[0].(time.Time) - - if !firstOk || !secondOk { - return shouldUseTimes - } - - if actualTime.Equal(expectedTime) { - return success - } - return ShouldHappenBefore(actualTime, expectedTime) -} - -// ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the first happens after the second. -func ShouldHappenAfter(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - expectedTime, secondOk := expected[0].(time.Time) - - if !firstOk || !secondOk { - return shouldUseTimes - } - if !actualTime.After(expectedTime) { - return fmt.Sprintf(shouldHaveHappenedAfter, actualTime, expectedTime, expectedTime.Sub(actualTime)) - } - return success -} - -// ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that the first happens on or after the second. -func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - expectedTime, secondOk := expected[0].(time.Time) - - if !firstOk || !secondOk { - return shouldUseTimes - } - if actualTime.Equal(expectedTime) { - return success - } - return ShouldHappenAfter(actualTime, expectedTime) -} - -// ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the first happens between (not on) the second and third. -func ShouldHappenBetween(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - min, secondOk := expected[0].(time.Time) - max, thirdOk := expected[1].(time.Time) - - if !firstOk || !secondOk || !thirdOk { - return shouldUseTimes - } - - if !actualTime.After(min) { - return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, min.Sub(actualTime)) - } - if !actualTime.Before(max) { - return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, actualTime.Sub(max)) - } - return success -} - -// ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first happens between or on the second and third. -func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - min, secondOk := expected[0].(time.Time) - max, thirdOk := expected[1].(time.Time) - - if !firstOk || !secondOk || !thirdOk { - return shouldUseTimes - } - if actualTime.Equal(min) || actualTime.Equal(max) { - return success - } - return ShouldHappenBetween(actualTime, min, max) -} - -// ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first -// does NOT happen between or on the second or third. -func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - min, secondOk := expected[0].(time.Time) - max, thirdOk := expected[1].(time.Time) - - if !firstOk || !secondOk || !thirdOk { - return shouldUseTimes - } - if actualTime.Equal(min) || actualTime.Equal(max) { - return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max) - } - if actualTime.After(min) && actualTime.Before(max) { - return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max) - } - return success -} - -// ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments) -// and asserts that the first time.Time happens within or on the duration specified relative to -// the other time.Time. -func ShouldHappenWithin(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - tolerance, secondOk := expected[0].(time.Duration) - threshold, thirdOk := expected[1].(time.Time) - - if !firstOk || !secondOk || !thirdOk { - return shouldUseDurationAndTime - } - - min := threshold.Add(-tolerance) - max := threshold.Add(tolerance) - return ShouldHappenOnOrBetween(actualTime, min, max) -} - -// ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments) -// and asserts that the first time.Time does NOT happen within or on the duration specified relative to -// the other time.Time. -func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string { - if fail := need(2, expected); fail != success { - return fail - } - actualTime, firstOk := actual.(time.Time) - tolerance, secondOk := expected[0].(time.Duration) - threshold, thirdOk := expected[1].(time.Time) - - if !firstOk || !secondOk || !thirdOk { - return shouldUseDurationAndTime - } - - min := threshold.Add(-tolerance) - max := threshold.Add(tolerance) - return ShouldNotHappenOnOrBetween(actualTime, min, max) -} - -// ShouldBeChronological receives a []time.Time slice and asserts that the are -// in chronological order starting with the first time.Time as the earliest. -func ShouldBeChronological(actual interface{}, expected ...interface{}) string { - if fail := need(0, expected); fail != success { - return fail - } - - times, ok := actual.([]time.Time) - if !ok { - return shouldUseTimeSlice - } - - var previous time.Time - for i, current := range times { - if i > 0 && current.Before(previous) { - return fmt.Sprintf(shouldHaveBeenChronological, - i, i-1, previous.String(), i, current.String()) - } - previous = current - } - return "" -} diff --git a/vendor/github.com/smartystreets/assertions/type.go b/vendor/github.com/smartystreets/assertions/type.go deleted file mode 100755 index e6a3a30..0000000 --- a/vendor/github.com/smartystreets/assertions/type.go +++ /dev/null @@ -1,112 +0,0 @@ -package assertions - -import ( - "fmt" - "reflect" -) - -// ShouldHaveSameTypeAs receives exactly two parameters and compares their underlying types for equality. -func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - first := reflect.TypeOf(actual) - second := reflect.TypeOf(expected[0]) - - if equal := ShouldEqual(first, second); equal != success { - return serializer.serialize(second, first, fmt.Sprintf(shouldHaveBeenA, actual, second, first)) - } - return success -} - -// ShouldNotHaveSameTypeAs receives exactly two parameters and compares their underlying types for inequality. -func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string { - if fail := need(1, expected); fail != success { - return fail - } - - first := reflect.TypeOf(actual) - second := reflect.TypeOf(expected[0]) - - if equal := ShouldEqual(first, second); equal == success { - return fmt.Sprintf(shouldNotHaveBeenA, actual, second) - } - return success -} - -// ShouldImplement receives exactly two parameters and ensures -// that the first implements the interface type of the second. -func ShouldImplement(actual interface{}, expectedList ...interface{}) string { - if fail := need(1, expectedList); fail != success { - return fail - } - - expected := expectedList[0] - if fail := ShouldBeNil(expected); fail != success { - return shouldCompareWithInterfacePointer - } - - if fail := ShouldNotBeNil(actual); fail != success { - return shouldNotBeNilActual - } - - var actualType reflect.Type - if reflect.TypeOf(actual).Kind() != reflect.Ptr { - actualType = reflect.PtrTo(reflect.TypeOf(actual)) - } else { - actualType = reflect.TypeOf(actual) - } - - expectedType := reflect.TypeOf(expected) - if fail := ShouldNotBeNil(expectedType); fail != success { - return shouldCompareWithInterfacePointer - } - - expectedInterface := expectedType.Elem() - - if actualType == nil { - return fmt.Sprintf(shouldHaveImplemented, expectedInterface, actual) - } - - if !actualType.Implements(expectedInterface) { - return fmt.Sprintf(shouldHaveImplemented, expectedInterface, actualType) - } - return success -} - -// ShouldNotImplement receives exactly two parameters and ensures -// that the first does NOT implement the interface type of the second. -func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string { - if fail := need(1, expectedList); fail != success { - return fail - } - - expected := expectedList[0] - if fail := ShouldBeNil(expected); fail != success { - return shouldCompareWithInterfacePointer - } - - if fail := ShouldNotBeNil(actual); fail != success { - return shouldNotBeNilActual - } - - var actualType reflect.Type - if reflect.TypeOf(actual).Kind() != reflect.Ptr { - actualType = reflect.PtrTo(reflect.TypeOf(actual)) - } else { - actualType = reflect.TypeOf(actual) - } - - expectedType := reflect.TypeOf(expected) - if fail := ShouldNotBeNil(expectedType); fail != success { - return shouldCompareWithInterfacePointer - } - - expectedInterface := expectedType.Elem() - - if actualType.Implements(expectedInterface) { - return fmt.Sprintf(shouldNotHaveImplemented, actualType, expectedInterface) - } - return success -} diff --git a/vendor/github.com/smartystreets/goconvey/LICENSE.md b/vendor/github.com/smartystreets/goconvey/LICENSE.md deleted file mode 100755 index d53a832..0000000 --- a/vendor/github.com/smartystreets/goconvey/LICENSE.md +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2016 SmartyStreets, LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -NOTE: Various optional and subordinate components carry their own licensing -requirements and restrictions. Use of those components is subject to the terms -and conditions outlined the respective license of each component. diff --git a/vendor/github.com/smartystreets/goconvey/convey/assertions.go b/vendor/github.com/smartystreets/goconvey/convey/assertions.go deleted file mode 100755 index 6a7c231..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/assertions.go +++ /dev/null @@ -1,68 +0,0 @@ -package convey - -import "github.com/smartystreets/assertions" - -var ( - ShouldEqual = assertions.ShouldEqual - ShouldNotEqual = assertions.ShouldNotEqual - ShouldAlmostEqual = assertions.ShouldAlmostEqual - ShouldNotAlmostEqual = assertions.ShouldNotAlmostEqual - ShouldResemble = assertions.ShouldResemble - ShouldNotResemble = assertions.ShouldNotResemble - ShouldPointTo = assertions.ShouldPointTo - ShouldNotPointTo = assertions.ShouldNotPointTo - ShouldBeNil = assertions.ShouldBeNil - ShouldNotBeNil = assertions.ShouldNotBeNil - ShouldBeTrue = assertions.ShouldBeTrue - ShouldBeFalse = assertions.ShouldBeFalse - ShouldBeZeroValue = assertions.ShouldBeZeroValue - - ShouldBeGreaterThan = assertions.ShouldBeGreaterThan - ShouldBeGreaterThanOrEqualTo = assertions.ShouldBeGreaterThanOrEqualTo - ShouldBeLessThan = assertions.ShouldBeLessThan - ShouldBeLessThanOrEqualTo = assertions.ShouldBeLessThanOrEqualTo - ShouldBeBetween = assertions.ShouldBeBetween - ShouldNotBeBetween = assertions.ShouldNotBeBetween - ShouldBeBetweenOrEqual = assertions.ShouldBeBetweenOrEqual - ShouldNotBeBetweenOrEqual = assertions.ShouldNotBeBetweenOrEqual - - ShouldContain = assertions.ShouldContain - ShouldNotContain = assertions.ShouldNotContain - ShouldContainKey = assertions.ShouldContainKey - ShouldNotContainKey = assertions.ShouldNotContainKey - ShouldBeIn = assertions.ShouldBeIn - ShouldNotBeIn = assertions.ShouldNotBeIn - ShouldBeEmpty = assertions.ShouldBeEmpty - ShouldNotBeEmpty = assertions.ShouldNotBeEmpty - ShouldHaveLength = assertions.ShouldHaveLength - - ShouldStartWith = assertions.ShouldStartWith - ShouldNotStartWith = assertions.ShouldNotStartWith - ShouldEndWith = assertions.ShouldEndWith - ShouldNotEndWith = assertions.ShouldNotEndWith - ShouldBeBlank = assertions.ShouldBeBlank - ShouldNotBeBlank = assertions.ShouldNotBeBlank - ShouldContainSubstring = assertions.ShouldContainSubstring - ShouldNotContainSubstring = assertions.ShouldNotContainSubstring - - ShouldPanic = assertions.ShouldPanic - ShouldNotPanic = assertions.ShouldNotPanic - ShouldPanicWith = assertions.ShouldPanicWith - ShouldNotPanicWith = assertions.ShouldNotPanicWith - - ShouldHaveSameTypeAs = assertions.ShouldHaveSameTypeAs - ShouldNotHaveSameTypeAs = assertions.ShouldNotHaveSameTypeAs - ShouldImplement = assertions.ShouldImplement - ShouldNotImplement = assertions.ShouldNotImplement - - ShouldHappenBefore = assertions.ShouldHappenBefore - ShouldHappenOnOrBefore = assertions.ShouldHappenOnOrBefore - ShouldHappenAfter = assertions.ShouldHappenAfter - ShouldHappenOnOrAfter = assertions.ShouldHappenOnOrAfter - ShouldHappenBetween = assertions.ShouldHappenBetween - ShouldHappenOnOrBetween = assertions.ShouldHappenOnOrBetween - ShouldNotHappenOnOrBetween = assertions.ShouldNotHappenOnOrBetween - ShouldHappenWithin = assertions.ShouldHappenWithin - ShouldNotHappenWithin = assertions.ShouldNotHappenWithin - ShouldBeChronological = assertions.ShouldBeChronological -) diff --git a/vendor/github.com/smartystreets/goconvey/convey/context.go b/vendor/github.com/smartystreets/goconvey/convey/context.go deleted file mode 100755 index fcea93e..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/context.go +++ /dev/null @@ -1,272 +0,0 @@ -package convey - -import ( - "fmt" - - "github.com/jtolds/gls" - "github.com/smartystreets/goconvey/convey/reporting" -) - -type conveyErr struct { - fmt string - params []interface{} -} - -func (e *conveyErr) Error() string { - return fmt.Sprintf(e.fmt, e.params...) -} - -func conveyPanic(fmt string, params ...interface{}) { - panic(&conveyErr{fmt, params}) -} - -const ( - missingGoTest = `Top-level calls to Convey(...) need a reference to the *testing.T. - Hint: Convey("description here", t, func() { /* notice that the second argument was the *testing.T (t)! */ }) ` - extraGoTest = `Only the top-level call to Convey(...) needs a reference to the *testing.T.` - noStackContext = "Convey operation made without context on goroutine stack.\n" + - "Hint: Perhaps you meant to use `Convey(..., func(c C){...})` ?" - differentConveySituations = "Different set of Convey statements on subsequent pass!\nDid not expect %#v." - multipleIdenticalConvey = "Multiple convey suites with identical names: %#v" -) - -const ( - failureHalt = "___FAILURE_HALT___" - - nodeKey = "node" -) - -///////////////////////////////// Stack Context ///////////////////////////////// - -func getCurrentContext() *context { - ctx, ok := ctxMgr.GetValue(nodeKey) - if ok { - return ctx.(*context) - } - return nil -} - -func mustGetCurrentContext() *context { - ctx := getCurrentContext() - if ctx == nil { - conveyPanic(noStackContext) - } - return ctx -} - -//////////////////////////////////// Context //////////////////////////////////// - -// context magically handles all coordination of Convey's and So assertions. -// -// It is tracked on the stack as goroutine-local-storage with the gls package, -// or explicitly if the user decides to call convey like: -// -// Convey(..., func(c C) { -// c.So(...) -// }) -// -// This implements the `C` interface. -type context struct { - reporter reporting.Reporter - - children map[string]*context - - resets []func() - - executedOnce bool - expectChildRun *bool - complete bool - - focus bool - failureMode FailureMode -} - -// rootConvey is the main entry point to a test suite. This is called when -// there's no context in the stack already, and items must contain a `t` object, -// or this panics. -func rootConvey(items ...interface{}) { - entry := discover(items) - - if entry.Test == nil { - conveyPanic(missingGoTest) - } - - expectChildRun := true - ctx := &context{ - reporter: buildReporter(), - - children: make(map[string]*context), - - expectChildRun: &expectChildRun, - - focus: entry.Focus, - failureMode: defaultFailureMode.combine(entry.FailMode), - } - ctxMgr.SetValues(gls.Values{nodeKey: ctx}, func() { - ctx.reporter.BeginStory(reporting.NewStoryReport(entry.Test)) - defer ctx.reporter.EndStory() - - for ctx.shouldVisit() { - ctx.conveyInner(entry.Situation, entry.Func) - expectChildRun = true - } - }) -} - -//////////////////////////////////// Methods //////////////////////////////////// - -func (ctx *context) SkipConvey(items ...interface{}) { - ctx.Convey(items, skipConvey) -} - -func (ctx *context) FocusConvey(items ...interface{}) { - ctx.Convey(items, focusConvey) -} - -func (ctx *context) Convey(items ...interface{}) { - entry := discover(items) - - // we're a branch, or leaf (on the wind) - if entry.Test != nil { - conveyPanic(extraGoTest) - } - if ctx.focus && !entry.Focus { - return - } - - var inner_ctx *context - if ctx.executedOnce { - var ok bool - inner_ctx, ok = ctx.children[entry.Situation] - if !ok { - conveyPanic(differentConveySituations, entry.Situation) - } - } else { - if _, ok := ctx.children[entry.Situation]; ok { - conveyPanic(multipleIdenticalConvey, entry.Situation) - } - inner_ctx = &context{ - reporter: ctx.reporter, - - children: make(map[string]*context), - - expectChildRun: ctx.expectChildRun, - - focus: entry.Focus, - failureMode: ctx.failureMode.combine(entry.FailMode), - } - ctx.children[entry.Situation] = inner_ctx - } - - if inner_ctx.shouldVisit() { - ctxMgr.SetValues(gls.Values{nodeKey: inner_ctx}, func() { - inner_ctx.conveyInner(entry.Situation, entry.Func) - }) - } -} - -func (ctx *context) SkipSo(stuff ...interface{}) { - ctx.assertionReport(reporting.NewSkipReport()) -} - -func (ctx *context) So(actual interface{}, assert assertion, expected ...interface{}) { - if result := assert(actual, expected...); result == assertionSuccess { - ctx.assertionReport(reporting.NewSuccessReport()) - } else { - ctx.assertionReport(reporting.NewFailureReport(result)) - } -} - -func (ctx *context) Reset(action func()) { - /* TODO: Failure mode configuration */ - ctx.resets = append(ctx.resets, action) -} - -func (ctx *context) Print(items ...interface{}) (int, error) { - fmt.Fprint(ctx.reporter, items...) - return fmt.Print(items...) -} - -func (ctx *context) Println(items ...interface{}) (int, error) { - fmt.Fprintln(ctx.reporter, items...) - return fmt.Println(items...) -} - -func (ctx *context) Printf(format string, items ...interface{}) (int, error) { - fmt.Fprintf(ctx.reporter, format, items...) - return fmt.Printf(format, items...) -} - -//////////////////////////////////// Private //////////////////////////////////// - -// shouldVisit returns true iff we should traverse down into a Convey. Note -// that just because we don't traverse a Convey this time, doesn't mean that -// we may not traverse it on a subsequent pass. -func (c *context) shouldVisit() bool { - return !c.complete && *c.expectChildRun -} - -// conveyInner is the function which actually executes the user's anonymous test -// function body. At this point, Convey or RootConvey has decided that this -// function should actually run. -func (ctx *context) conveyInner(situation string, f func(C)) { - // Record/Reset state for next time. - defer func() { - ctx.executedOnce = true - - // This is only needed at the leaves, but there's no harm in also setting it - // when returning from branch Convey's - *ctx.expectChildRun = false - }() - - // Set up+tear down our scope for the reporter - ctx.reporter.Enter(reporting.NewScopeReport(situation)) - defer ctx.reporter.Exit() - - // Recover from any panics in f, and assign the `complete` status for this - // node of the tree. - defer func() { - ctx.complete = true - if problem := recover(); problem != nil { - if problem, ok := problem.(*conveyErr); ok { - panic(problem) - } - if problem != failureHalt { - ctx.reporter.Report(reporting.NewErrorReport(problem)) - } - } else { - for _, child := range ctx.children { - if !child.complete { - ctx.complete = false - return - } - } - } - }() - - // Resets are registered as the `f` function executes, so nil them here. - // All resets are run in registration order (FIFO). - ctx.resets = []func(){} - defer func() { - for _, r := range ctx.resets { - // panics handled by the previous defer - r() - } - }() - - if f == nil { - // if f is nil, this was either a Convey(..., nil), or a SkipConvey - ctx.reporter.Report(reporting.NewSkipReport()) - } else { - f(ctx) - } -} - -// assertionReport is a tools for So and SkipSo which makes the report and -// then possibly panics, depending on the current context's failureMode. -func (ctx *context) assertionReport(r *reporting.AssertionResult) { - ctx.reporter.Report(r) - if r.Failure != "" && ctx.failureMode == FailureHalts { - panic(failureHalt) - } -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey b/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey deleted file mode 100755 index da7255c..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey +++ /dev/null @@ -1,4 +0,0 @@ -#ignore --timeout=1s -#-covermode=count -#-coverpkg=github.com/smartystreets/goconvey/convey,github.com/smartystreets/goconvey/convey/gotest,github.com/smartystreets/goconvey/convey/reporting \ No newline at end of file diff --git a/vendor/github.com/smartystreets/goconvey/convey/discovery.go b/vendor/github.com/smartystreets/goconvey/convey/discovery.go deleted file mode 100755 index 4c9a8cb..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/discovery.go +++ /dev/null @@ -1,103 +0,0 @@ -package convey - -type actionSpecifier uint8 - -const ( - noSpecifier actionSpecifier = iota - skipConvey - focusConvey -) - -type suite struct { - Situation string - Test t - Focus bool - Func func(C) // nil means skipped - FailMode FailureMode -} - -func newSuite(situation string, failureMode FailureMode, f func(C), test t, specifier actionSpecifier) *suite { - ret := &suite{ - Situation: situation, - Test: test, - Func: f, - FailMode: failureMode, - } - switch specifier { - case skipConvey: - ret.Func = nil - case focusConvey: - ret.Focus = true - } - return ret -} - -func discover(items []interface{}) *suite { - name, items := parseName(items) - test, items := parseGoTest(items) - failure, items := parseFailureMode(items) - action, items := parseAction(items) - specifier, items := parseSpecifier(items) - - if len(items) != 0 { - conveyPanic(parseError) - } - - return newSuite(name, failure, action, test, specifier) -} -func item(items []interface{}) interface{} { - if len(items) == 0 { - conveyPanic(parseError) - } - return items[0] -} -func parseName(items []interface{}) (string, []interface{}) { - if name, parsed := item(items).(string); parsed { - return name, items[1:] - } - conveyPanic(parseError) - panic("never get here") -} -func parseGoTest(items []interface{}) (t, []interface{}) { - if test, parsed := item(items).(t); parsed { - return test, items[1:] - } - return nil, items -} -func parseFailureMode(items []interface{}) (FailureMode, []interface{}) { - if mode, parsed := item(items).(FailureMode); parsed { - return mode, items[1:] - } - return FailureInherits, items -} -func parseAction(items []interface{}) (func(C), []interface{}) { - switch x := item(items).(type) { - case nil: - return nil, items[1:] - case func(C): - return x, items[1:] - case func(): - return func(C) { x() }, items[1:] - } - conveyPanic(parseError) - panic("never get here") -} -func parseSpecifier(items []interface{}) (actionSpecifier, []interface{}) { - if len(items) == 0 { - return noSpecifier, items - } - if spec, ok := items[0].(actionSpecifier); ok { - return spec, items[1:] - } - conveyPanic(parseError) - panic("never get here") -} - -// This interface allows us to pass the *testing.T struct -// throughout the internals of this package without ever -// having to import the "testing" package. -type t interface { - Fail() -} - -const parseError = "You must provide a name (string), then a *testing.T (if in outermost scope), an optional FailureMode, and then an action (func())." diff --git a/vendor/github.com/smartystreets/goconvey/convey/doc.go b/vendor/github.com/smartystreets/goconvey/convey/doc.go deleted file mode 100755 index ac0b984..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/doc.go +++ /dev/null @@ -1,218 +0,0 @@ -// Package convey contains all of the public-facing entry points to this project. -// This means that it should never be required of the user to import any other -// packages from this project as they serve internal purposes. -package convey - -import "github.com/smartystreets/goconvey/convey/reporting" - -////////////////////////////////// suite ////////////////////////////////// - -// C is the Convey context which you can optionally obtain in your action -// by calling Convey like: -// -// Convey(..., func(c C) { -// ... -// }) -// -// See the documentation on Convey for more details. -// -// All methods in this context behave identically to the global functions of the -// same name in this package. -type C interface { - Convey(items ...interface{}) - SkipConvey(items ...interface{}) - FocusConvey(items ...interface{}) - - So(actual interface{}, assert assertion, expected ...interface{}) - SkipSo(stuff ...interface{}) - - Reset(action func()) - - Println(items ...interface{}) (int, error) - Print(items ...interface{}) (int, error) - Printf(format string, items ...interface{}) (int, error) -} - -// Convey is the method intended for use when declaring the scopes of -// a specification. Each scope has a description and a func() which may contain -// other calls to Convey(), Reset() or Should-style assertions. Convey calls can -// be nested as far as you see fit. -// -// IMPORTANT NOTE: The top-level Convey() within a Test method -// must conform to the following signature: -// -// Convey(description string, t *testing.T, action func()) -// -// All other calls should look like this (no need to pass in *testing.T): -// -// Convey(description string, action func()) -// -// Don't worry, goconvey will panic if you get it wrong so you can fix it. -// -// Additionally, you may explicitly obtain access to the Convey context by doing: -// -// Convey(description string, action func(c C)) -// -// You may need to do this if you want to pass the context through to a -// goroutine, or to close over the context in a handler to a library which -// calls your handler in a goroutine (httptest comes to mind). -// -// All Convey()-blocks also accept an optional parameter of FailureMode which sets -// how goconvey should treat failures for So()-assertions in the block and -// nested blocks. See the constants in this file for the available options. -// -// By default it will inherit from its parent block and the top-level blocks -// default to the FailureHalts setting. -// -// This parameter is inserted before the block itself: -// -// Convey(description string, t *testing.T, mode FailureMode, action func()) -// Convey(description string, mode FailureMode, action func()) -// -// See the examples package for, well, examples. -func Convey(items ...interface{}) { - if ctx := getCurrentContext(); ctx == nil { - rootConvey(items...) - } else { - ctx.Convey(items...) - } -} - -// SkipConvey is analagous to Convey except that the scope is not executed -// (which means that child scopes defined within this scope are not run either). -// The reporter will be notified that this step was skipped. -func SkipConvey(items ...interface{}) { - Convey(append(items, skipConvey)...) -} - -// FocusConvey is has the inverse effect of SkipConvey. If the top-level -// Convey is changed to `FocusConvey`, only nested scopes that are defined -// with FocusConvey will be run. The rest will be ignored completely. This -// is handy when debugging a large suite that runs a misbehaving function -// repeatedly as you can disable all but one of that function -// without swaths of `SkipConvey` calls, just a targeted chain of calls -// to FocusConvey. -func FocusConvey(items ...interface{}) { - Convey(append(items, focusConvey)...) -} - -// Reset registers a cleanup function to be run after each Convey() -// in the same scope. See the examples package for a simple use case. -func Reset(action func()) { - mustGetCurrentContext().Reset(action) -} - -/////////////////////////////////// Assertions /////////////////////////////////// - -// assertion is an alias for a function with a signature that the convey.So() -// method can handle. Any future or custom assertions should conform to this -// method signature. The return value should be an empty string if the assertion -// passes and a well-formed failure message if not. -type assertion func(actual interface{}, expected ...interface{}) string - -const assertionSuccess = "" - -// So is the means by which assertions are made against the system under test. -// The majority of exported names in the assertions package begin with the word -// 'Should' and describe how the first argument (actual) should compare with any -// of the final (expected) arguments. How many final arguments are accepted -// depends on the particular assertion that is passed in as the assert argument. -// See the examples package for use cases and the assertions package for -// documentation on specific assertion methods. A failing assertion will -// cause t.Fail() to be invoked--you should never call this method (or other -// failure-inducing methods) in your test code. Leave that to GoConvey. -func So(actual interface{}, assert assertion, expected ...interface{}) { - mustGetCurrentContext().So(actual, assert, expected...) -} - -// SkipSo is analagous to So except that the assertion that would have been passed -// to So is not executed and the reporter is notified that the assertion was skipped. -func SkipSo(stuff ...interface{}) { - mustGetCurrentContext().SkipSo() -} - -// FailureMode is a type which determines how the So() blocks should fail -// if their assertion fails. See constants further down for acceptable values -type FailureMode string - -const ( - - // FailureContinues is a failure mode which prevents failing - // So()-assertions from halting Convey-block execution, instead - // allowing the test to continue past failing So()-assertions. - FailureContinues FailureMode = "continue" - - // FailureHalts is the default setting for a top-level Convey()-block - // and will cause all failing So()-assertions to halt further execution - // in that test-arm and continue on to the next arm. - FailureHalts FailureMode = "halt" - - // FailureInherits is the default setting for failure-mode, it will - // default to the failure-mode of the parent block. You should never - // need to specify this mode in your tests.. - FailureInherits FailureMode = "inherits" -) - -func (f FailureMode) combine(other FailureMode) FailureMode { - if other == FailureInherits { - return f - } - return other -} - -var defaultFailureMode FailureMode = FailureHalts - -// SetDefaultFailureMode allows you to specify the default failure mode -// for all Convey blocks. It is meant to be used in an init function to -// allow the default mode to be changdd across all tests for an entire packgae -// but it can be used anywhere. -func SetDefaultFailureMode(mode FailureMode) { - if mode == FailureContinues || mode == FailureHalts { - defaultFailureMode = mode - } else { - panic("You may only use the constants named 'FailureContinues' and 'FailureHalts' as default failure modes.") - } -} - -//////////////////////////////////// Print functions //////////////////////////////////// - -// Print is analogous to fmt.Print (and it even calls fmt.Print). It ensures that -// output is aligned with the corresponding scopes in the web UI. -func Print(items ...interface{}) (written int, err error) { - return mustGetCurrentContext().Print(items...) -} - -// Print is analogous to fmt.Println (and it even calls fmt.Println). It ensures that -// output is aligned with the corresponding scopes in the web UI. -func Println(items ...interface{}) (written int, err error) { - return mustGetCurrentContext().Println(items...) -} - -// Print is analogous to fmt.Printf (and it even calls fmt.Printf). It ensures that -// output is aligned with the corresponding scopes in the web UI. -func Printf(format string, items ...interface{}) (written int, err error) { - return mustGetCurrentContext().Printf(format, items...) -} - -/////////////////////////////////////////////////////////////////////////////// - -// SuppressConsoleStatistics prevents automatic printing of console statistics. -// Calling PrintConsoleStatistics explicitly will force printing of statistics. -func SuppressConsoleStatistics() { - reporting.SuppressConsoleStatistics() -} - -// ConsoleStatistics may be called at any time to print assertion statistics. -// Generally, the best place to do this would be in a TestMain function, -// after all tests have been run. Something like this: -// -// func TestMain(m *testing.M) { -// convey.SuppressConsoleStatistics() -// result := m.Run() -// convey.PrintConsoleStatistics() -// os.Exit(result) -// } -// -func PrintConsoleStatistics() { - reporting.PrintConsoleStatistics() -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go b/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go deleted file mode 100755 index 7c5f662..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go +++ /dev/null @@ -1,28 +0,0 @@ -// Package gotest contains internal functionality. Although this package -// contains one or more exported names it is not intended for tools -// consumption. See the examples package for how to use this project. -package gotest - -import ( - "runtime" - "strings" -) - -func ResolveExternalCaller() (file string, line int, name string) { - var caller_id uintptr - callers := runtime.Callers(0, callStack) - - for x := 0; x < callers; x++ { - caller_id, file, line, _ = runtime.Caller(x) - if strings.HasSuffix(file, "_test.go") || strings.HasSuffix(file, "_tests.go") { - name = runtime.FuncForPC(caller_id).Name() - return - } - } - file, line, name = "", -1, "" - return // panic? -} - -const maxStackDepth = 100 // This had better be enough... - -var callStack []uintptr = make([]uintptr, maxStackDepth, maxStackDepth) diff --git a/vendor/github.com/smartystreets/goconvey/convey/init.go b/vendor/github.com/smartystreets/goconvey/convey/init.go deleted file mode 100755 index 0145145..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/init.go +++ /dev/null @@ -1,81 +0,0 @@ -package convey - -import ( - "flag" - "os" - - "github.com/jtolds/gls" - "github.com/smartystreets/assertions" - "github.com/smartystreets/goconvey/convey/reporting" -) - -func init() { - assertions.GoConveyMode(true) - - declareFlags() - - ctxMgr = gls.NewContextManager() -} - -func declareFlags() { - flag.BoolVar(&json, "convey-json", false, "When true, emits results in JSON blocks. Default: 'false'") - flag.BoolVar(&silent, "convey-silent", false, "When true, all output from GoConvey is suppressed.") - flag.BoolVar(&story, "convey-story", false, "When true, emits story output, otherwise emits dot output. When not provided, this flag mirros the value of the '-test.v' flag") - - if noStoryFlagProvided() { - story = verboseEnabled - } - - // FYI: flag.Parse() is called from the testing package. -} - -func noStoryFlagProvided() bool { - return !story && !storyDisabled -} - -func buildReporter() reporting.Reporter { - selectReporter := os.Getenv("GOCONVEY_REPORTER") - - switch { - case testReporter != nil: - return testReporter - case json || selectReporter == "json": - return reporting.BuildJsonReporter() - case silent || selectReporter == "silent": - return reporting.BuildSilentReporter() - case selectReporter == "dot": - // Story is turned on when verbose is set, so we need to check for dot reporter first. - return reporting.BuildDotReporter() - case story || selectReporter == "story": - return reporting.BuildStoryReporter() - default: - return reporting.BuildDotReporter() - } -} - -var ( - ctxMgr *gls.ContextManager - - // only set by internal tests - testReporter reporting.Reporter -) - -var ( - json bool - silent bool - story bool - - verboseEnabled = flagFound("-test.v=true") - storyDisabled = flagFound("-story=false") -) - -// flagFound parses the command line args manually for flags defined in other -// packages. Like the '-v' flag from the "testing" package, for instance. -func flagFound(flagValue string) bool { - for _, arg := range os.Args { - if arg == flagValue { - return true - } - } - return false -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go b/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go deleted file mode 100755 index 03f0f06..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go +++ /dev/null @@ -1,15 +0,0 @@ -package convey - -import ( - "github.com/smartystreets/goconvey/convey/reporting" -) - -type nilReporter struct{} - -func (self *nilReporter) BeginStory(story *reporting.StoryReport) {} -func (self *nilReporter) Enter(scope *reporting.ScopeReport) {} -func (self *nilReporter) Report(report *reporting.AssertionResult) {} -func (self *nilReporter) Exit() {} -func (self *nilReporter) EndStory() {} -func (self *nilReporter) Write(p []byte) (int, error) { return len(p), nil } -func newNilReporter() *nilReporter { return &nilReporter{} } diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go deleted file mode 100755 index d7f574b..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go +++ /dev/null @@ -1,16 +0,0 @@ -package reporting - -import ( - "fmt" - "io" -) - -type console struct{} - -func (self *console) Write(p []byte) (n int, err error) { - return fmt.Print(string(p)) -} - -func NewConsole() io.Writer { - return new(console) -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go deleted file mode 100755 index 1677953..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package reporting contains internal functionality related -// to console reporting and output. Although this package has -// exported names is not intended for tools consumption. See the -// examples package for how to use this project. -package reporting diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go deleted file mode 100755 index c1299ba..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go +++ /dev/null @@ -1,40 +0,0 @@ -package reporting - -import "fmt" - -type dot struct{ out *Printer } - -func (self *dot) BeginStory(story *StoryReport) {} - -func (self *dot) Enter(scope *ScopeReport) {} - -func (self *dot) Report(report *AssertionResult) { - if report.Error != nil { - fmt.Print(redColor) - self.out.Insert(dotError) - } else if report.Failure != "" { - fmt.Print(yellowColor) - self.out.Insert(dotFailure) - } else if report.Skipped { - fmt.Print(yellowColor) - self.out.Insert(dotSkip) - } else { - fmt.Print(greenColor) - self.out.Insert(dotSuccess) - } - fmt.Print(resetColor) -} - -func (self *dot) Exit() {} - -func (self *dot) EndStory() {} - -func (self *dot) Write(content []byte) (written int, err error) { - return len(content), nil // no-op -} - -func NewDotReporter(out *Printer) *dot { - self := new(dot) - self.out = out - return self -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go deleted file mode 100755 index 605021f..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go +++ /dev/null @@ -1,33 +0,0 @@ -package reporting - -type gotestReporter struct{ test T } - -func (self *gotestReporter) BeginStory(story *StoryReport) { - self.test = story.Test -} - -func (self *gotestReporter) Enter(scope *ScopeReport) {} - -func (self *gotestReporter) Report(r *AssertionResult) { - if !passed(r) { - self.test.Fail() - } -} - -func (self *gotestReporter) Exit() {} - -func (self *gotestReporter) EndStory() { - self.test = nil -} - -func (self *gotestReporter) Write(content []byte) (written int, err error) { - return len(content), nil // no-op -} - -func NewGoTestReporter() *gotestReporter { - return new(gotestReporter) -} - -func passed(r *AssertionResult) bool { - return r.Error == nil && r.Failure == "" -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go deleted file mode 100755 index 287961a..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go +++ /dev/null @@ -1,94 +0,0 @@ -package reporting - -import ( - "os" - "runtime" - "strings" -) - -func init() { - if !isColorableTerminal() { - monochrome() - } - - if runtime.GOOS == "windows" { - success, failure, error_ = dotSuccess, dotFailure, dotError - } -} - -func BuildJsonReporter() Reporter { - out := NewPrinter(NewConsole()) - return NewReporters( - NewGoTestReporter(), - NewJsonReporter(out)) -} -func BuildDotReporter() Reporter { - out := NewPrinter(NewConsole()) - return NewReporters( - NewGoTestReporter(), - NewDotReporter(out), - NewProblemReporter(out), - consoleStatistics) -} -func BuildStoryReporter() Reporter { - out := NewPrinter(NewConsole()) - return NewReporters( - NewGoTestReporter(), - NewStoryReporter(out), - NewProblemReporter(out), - consoleStatistics) -} -func BuildSilentReporter() Reporter { - out := NewPrinter(NewConsole()) - return NewReporters( - NewGoTestReporter(), - NewSilentProblemReporter(out)) -} - -var ( - newline = "\n" - success = "✔" - failure = "✘" - error_ = "🔥" - skip = "⚠" - dotSuccess = "." - dotFailure = "x" - dotError = "E" - dotSkip = "S" - errorTemplate = "* %s \nLine %d: - %v \n%s\n" - failureTemplate = "* %s \nLine %d:\n%s\n" -) - -var ( - greenColor = "\033[32m" - yellowColor = "\033[33m" - redColor = "\033[31m" - resetColor = "\033[0m" -) - -var consoleStatistics = NewStatisticsReporter(NewPrinter(NewConsole())) - -func SuppressConsoleStatistics() { consoleStatistics.Suppress() } -func PrintConsoleStatistics() { consoleStatistics.PrintSummary() } - -// QuiteMode disables all console output symbols. This is only meant to be used -// for tests that are internal to goconvey where the output is distracting or -// otherwise not needed in the test output. -func QuietMode() { - success, failure, error_, skip, dotSuccess, dotFailure, dotError, dotSkip = "", "", "", "", "", "", "", "" -} - -func monochrome() { - greenColor, yellowColor, redColor, resetColor = "", "", "", "" -} - -func isColorableTerminal() bool { - return strings.Contains(os.Getenv("TERM"), "color") -} - -// This interface allows us to pass the *testing.T struct -// throughout the internals of this tool without ever -// having to import the "testing" package. -type T interface { - Fail() -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go deleted file mode 100755 index aeb9220..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go +++ /dev/null @@ -1,88 +0,0 @@ -// TODO: under unit test - -package reporting - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type JsonReporter struct { - out *Printer - currentKey []string - current *ScopeResult - index map[string]*ScopeResult - scopes []*ScopeResult -} - -func (self *JsonReporter) depth() int { return len(self.currentKey) } - -func (self *JsonReporter) BeginStory(story *StoryReport) {} - -func (self *JsonReporter) Enter(scope *ScopeReport) { - self.currentKey = append(self.currentKey, scope.Title) - ID := strings.Join(self.currentKey, "|") - if _, found := self.index[ID]; !found { - next := newScopeResult(scope.Title, self.depth(), scope.File, scope.Line) - self.scopes = append(self.scopes, next) - self.index[ID] = next - } - self.current = self.index[ID] -} - -func (self *JsonReporter) Report(report *AssertionResult) { - self.current.Assertions = append(self.current.Assertions, report) -} - -func (self *JsonReporter) Exit() { - self.currentKey = self.currentKey[:len(self.currentKey)-1] -} - -func (self *JsonReporter) EndStory() { - self.report() - self.reset() -} -func (self *JsonReporter) report() { - scopes := []string{} - for _, scope := range self.scopes { - serialized, err := json.Marshal(scope) - if err != nil { - self.out.Println(jsonMarshalFailure) - panic(err) - } - var buffer bytes.Buffer - json.Indent(&buffer, serialized, "", " ") - scopes = append(scopes, buffer.String()) - } - self.out.Print(fmt.Sprintf("%s\n%s,\n%s\n", OpenJson, strings.Join(scopes, ","), CloseJson)) -} -func (self *JsonReporter) reset() { - self.scopes = []*ScopeResult{} - self.index = map[string]*ScopeResult{} - self.currentKey = nil -} - -func (self *JsonReporter) Write(content []byte) (written int, err error) { - self.current.Output += string(content) - return len(content), nil -} - -func NewJsonReporter(out *Printer) *JsonReporter { - self := new(JsonReporter) - self.out = out - self.reset() - return self -} - -const OpenJson = ">->->OPEN-JSON->->->" // "⌦" -const CloseJson = "<-<-<-CLOSE-JSON<-<-<" // "⌫" -const jsonMarshalFailure = ` - -GOCONVEY_JSON_MARSHALL_FAILURE: There was an error when attempting to convert test results to JSON. -Please file a bug report and reference the code that caused this failure if possible. - -Here's the panic: - -` diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go deleted file mode 100755 index 5cf5a90..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go +++ /dev/null @@ -1,57 +0,0 @@ -package reporting - -import ( - "fmt" - "io" - "strings" -) - -type Printer struct { - out io.Writer - prefix string -} - -func (self *Printer) Println(message string, values ...interface{}) { - formatted := self.format(message, values...) + newline - self.out.Write([]byte(formatted)) -} - -func (self *Printer) Print(message string, values ...interface{}) { - formatted := self.format(message, values...) - self.out.Write([]byte(formatted)) -} - -func (self *Printer) Insert(text string) { - self.out.Write([]byte(text)) -} - -func (self *Printer) format(message string, values ...interface{}) string { - var formatted string - if len(values) == 0 { - formatted = self.prefix + message - } else { - formatted = self.prefix + fmt.Sprintf(message, values...) - } - indented := strings.Replace(formatted, newline, newline+self.prefix, -1) - return strings.TrimRight(indented, space) -} - -func (self *Printer) Indent() { - self.prefix += pad -} - -func (self *Printer) Dedent() { - if len(self.prefix) >= padLength { - self.prefix = self.prefix[:len(self.prefix)-padLength] - } -} - -func NewPrinter(out io.Writer) *Printer { - self := new(Printer) - self.out = out - return self -} - -const space = " " -const pad = space + space -const padLength = len(pad) diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go deleted file mode 100755 index d842dc6..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go +++ /dev/null @@ -1,80 +0,0 @@ -package reporting - -import "fmt" - -type problem struct { - silent bool - out *Printer - errors []*AssertionResult - failures []*AssertionResult -} - -func (self *problem) BeginStory(story *StoryReport) {} - -func (self *problem) Enter(scope *ScopeReport) {} - -func (self *problem) Report(report *AssertionResult) { - if report.Error != nil { - self.errors = append(self.errors, report) - } else if report.Failure != "" { - self.failures = append(self.failures, report) - } -} - -func (self *problem) Exit() {} - -func (self *problem) EndStory() { - self.show(self.showErrors, redColor) - self.show(self.showFailures, yellowColor) - self.prepareForNextStory() -} -func (self *problem) show(display func(), color string) { - if !self.silent { - fmt.Print(color) - } - display() - if !self.silent { - fmt.Print(resetColor) - } - self.out.Dedent() -} -func (self *problem) showErrors() { - for i, e := range self.errors { - if i == 0 { - self.out.Println("\nErrors:\n") - self.out.Indent() - } - self.out.Println(errorTemplate, e.File, e.Line, e.Error, e.StackTrace) - } -} -func (self *problem) showFailures() { - for i, f := range self.failures { - if i == 0 { - self.out.Println("\nFailures:\n") - self.out.Indent() - } - self.out.Println(failureTemplate, f.File, f.Line, f.Failure) - } -} - -func (self *problem) Write(content []byte) (written int, err error) { - return len(content), nil // no-op -} - -func NewProblemReporter(out *Printer) *problem { - self := new(problem) - self.out = out - self.prepareForNextStory() - return self -} - -func NewSilentProblemReporter(out *Printer) *problem { - self := NewProblemReporter(out) - self.silent = true - return self -} - -func (self *problem) prepareForNextStory() { - self.errors = []*AssertionResult{} - self.failures = []*AssertionResult{} -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go deleted file mode 100755 index f7d6e85..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go +++ /dev/null @@ -1,39 +0,0 @@ -package reporting - -import "io" - -type Reporter interface { - BeginStory(story *StoryReport) - Enter(scope *ScopeReport) - Report(r *AssertionResult) - Exit() - EndStory() - io.Writer -} - -type reporters struct{ collection []Reporter } - -func (self *reporters) BeginStory(s *StoryReport) { self.foreach(func(r Reporter) { r.BeginStory(s) }) } -func (self *reporters) Enter(s *ScopeReport) { self.foreach(func(r Reporter) { r.Enter(s) }) } -func (self *reporters) Report(a *AssertionResult) { self.foreach(func(r Reporter) { r.Report(a) }) } -func (self *reporters) Exit() { self.foreach(func(r Reporter) { r.Exit() }) } -func (self *reporters) EndStory() { self.foreach(func(r Reporter) { r.EndStory() }) } - -func (self *reporters) Write(contents []byte) (written int, err error) { - self.foreach(func(r Reporter) { - written, err = r.Write(contents) - }) - return written, err -} - -func (self *reporters) foreach(action func(Reporter)) { - for _, r := range self.collection { - action(r) - } -} - -func NewReporters(collection ...Reporter) *reporters { - self := new(reporters) - self.collection = collection - return self -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey deleted file mode 100755 index b624308..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey +++ /dev/null @@ -1,2 +0,0 @@ -#ignore --timeout=1s diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go deleted file mode 100755 index ee18cc6..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go +++ /dev/null @@ -1,179 +0,0 @@ -package reporting - -import ( - "encoding/json" - "fmt" - "runtime" - "strings" - - "github.com/smartystreets/goconvey/convey/gotest" -) - -////////////////// ScopeReport //////////////////// - -type ScopeReport struct { - Title string - File string - Line int -} - -func NewScopeReport(title string) *ScopeReport { - file, line, _ := gotest.ResolveExternalCaller() - self := new(ScopeReport) - self.Title = title - self.File = file - self.Line = line - return self -} - -////////////////// ScopeResult //////////////////// - -type ScopeResult struct { - Title string - File string - Line int - Depth int - Assertions []*AssertionResult - Output string -} - -func newScopeResult(title string, depth int, file string, line int) *ScopeResult { - self := new(ScopeResult) - self.Title = title - self.Depth = depth - self.File = file - self.Line = line - self.Assertions = []*AssertionResult{} - return self -} - -/////////////////// StoryReport ///////////////////// - -type StoryReport struct { - Test T - Name string - File string - Line int -} - -func NewStoryReport(test T) *StoryReport { - file, line, name := gotest.ResolveExternalCaller() - name = removePackagePath(name) - self := new(StoryReport) - self.Test = test - self.Name = name - self.File = file - self.Line = line - return self -} - -// name comes in looking like "github.com/smartystreets/goconvey/examples.TestName". -// We only want the stuff after the last '.', which is the name of the test function. -func removePackagePath(name string) string { - parts := strings.Split(name, ".") - return parts[len(parts)-1] -} - -/////////////////// FailureView //////////////////////// - -// This struct is also declared in github.com/smartystreets/assertions. -// The json struct tags should be equal in both declarations. -type FailureView struct { - Message string `json:"Message"` - Expected string `json:"Expected"` - Actual string `json:"Actual"` -} - -////////////////////AssertionResult ////////////////////// - -type AssertionResult struct { - File string - Line int - Expected string - Actual string - Failure string - Error interface{} - StackTrace string - Skipped bool -} - -func NewFailureReport(failure string) *AssertionResult { - report := new(AssertionResult) - report.File, report.Line = caller() - report.StackTrace = stackTrace() - parseFailure(failure, report) - return report -} -func parseFailure(failure string, report *AssertionResult) { - view := new(FailureView) - err := json.Unmarshal([]byte(failure), view) - if err == nil { - report.Failure = view.Message - report.Expected = view.Expected - report.Actual = view.Actual - } else { - report.Failure = failure - } -} -func NewErrorReport(err interface{}) *AssertionResult { - report := new(AssertionResult) - report.File, report.Line = caller() - report.StackTrace = fullStackTrace() - report.Error = fmt.Sprintf("%v", err) - return report -} -func NewSuccessReport() *AssertionResult { - return new(AssertionResult) -} -func NewSkipReport() *AssertionResult { - report := new(AssertionResult) - report.File, report.Line = caller() - report.StackTrace = fullStackTrace() - report.Skipped = true - return report -} - -func caller() (file string, line int) { - file, line, _ = gotest.ResolveExternalCaller() - return -} - -func stackTrace() string { - buffer := make([]byte, 1024*64) - n := runtime.Stack(buffer, false) - return removeInternalEntries(string(buffer[:n])) -} -func fullStackTrace() string { - buffer := make([]byte, 1024*64) - n := runtime.Stack(buffer, true) - return removeInternalEntries(string(buffer[:n])) -} -func removeInternalEntries(stack string) string { - lines := strings.Split(stack, newline) - filtered := []string{} - for _, line := range lines { - if !isExternal(line) { - filtered = append(filtered, line) - } - } - return strings.Join(filtered, newline) -} -func isExternal(line string) bool { - for _, p := range internalPackages { - if strings.Contains(line, p) { - return true - } - } - return false -} - -// NOTE: any new packages that host goconvey packages will need to be added here! -// An alternative is to scan the goconvey directory and then exclude stuff like -// the examples package but that's nasty too. -var internalPackages = []string{ - "goconvey/assertions", - "goconvey/convey", - "goconvey/execution", - "goconvey/gotest", - "goconvey/reporting", -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go deleted file mode 100755 index cbe03da..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go +++ /dev/null @@ -1,108 +0,0 @@ -package reporting - -import ( - "fmt" - "sync" -) - -func (self *statistics) BeginStory(story *StoryReport) {} - -func (self *statistics) Enter(scope *ScopeReport) {} - -func (self *statistics) Report(report *AssertionResult) { - self.Lock() - defer self.Unlock() - - if !self.failing && report.Failure != "" { - self.failing = true - } - if !self.erroring && report.Error != nil { - self.erroring = true - } - if report.Skipped { - self.skipped += 1 - } else { - self.total++ - } -} - -func (self *statistics) Exit() {} - -func (self *statistics) EndStory() { - self.Lock() - defer self.Unlock() - - if !self.suppressed { - self.printSummaryLocked() - } -} - -func (self *statistics) Suppress() { - self.Lock() - defer self.Unlock() - self.suppressed = true -} - -func (self *statistics) PrintSummary() { - self.Lock() - defer self.Unlock() - self.printSummaryLocked() -} - -func (self *statistics) printSummaryLocked() { - self.reportAssertionsLocked() - self.reportSkippedSectionsLocked() - self.completeReportLocked() -} -func (self *statistics) reportAssertionsLocked() { - self.decideColorLocked() - self.out.Print("\n%d total %s", self.total, plural("assertion", self.total)) -} -func (self *statistics) decideColorLocked() { - if self.failing && !self.erroring { - fmt.Print(yellowColor) - } else if self.erroring { - fmt.Print(redColor) - } else { - fmt.Print(greenColor) - } -} -func (self *statistics) reportSkippedSectionsLocked() { - if self.skipped > 0 { - fmt.Print(yellowColor) - self.out.Print(" (one or more sections skipped)") - } -} -func (self *statistics) completeReportLocked() { - fmt.Print(resetColor) - self.out.Print("\n") - self.out.Print("\n") -} - -func (self *statistics) Write(content []byte) (written int, err error) { - return len(content), nil // no-op -} - -func NewStatisticsReporter(out *Printer) *statistics { - self := statistics{} - self.out = out - return &self -} - -type statistics struct { - sync.Mutex - - out *Printer - total int - failing bool - erroring bool - skipped int - suppressed bool -} - -func plural(word string, count int) string { - if count == 1 { - return word - } - return word + "s" -} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go deleted file mode 100755 index 10e999e..0000000 --- a/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go +++ /dev/null @@ -1,73 +0,0 @@ -// TODO: in order for this reporter to be completely honest -// we need to retrofit to be more like the json reporter such that: -// 1. it maintains ScopeResult collections, which count assertions -// 2. it reports only after EndStory(), so that all tick marks -// are placed near the appropriate title. -// 3. Under unit test - -package reporting - -import ( - "fmt" - "strings" -) - -type story struct { - out *Printer - titlesById map[string]string - currentKey []string -} - -func (self *story) BeginStory(story *StoryReport) {} - -func (self *story) Enter(scope *ScopeReport) { - self.out.Indent() - - self.currentKey = append(self.currentKey, scope.Title) - ID := strings.Join(self.currentKey, "|") - - if _, found := self.titlesById[ID]; !found { - self.out.Println("") - self.out.Print(scope.Title) - self.out.Insert(" ") - self.titlesById[ID] = scope.Title - } -} - -func (self *story) Report(report *AssertionResult) { - if report.Error != nil { - fmt.Print(redColor) - self.out.Insert(error_) - } else if report.Failure != "" { - fmt.Print(yellowColor) - self.out.Insert(failure) - } else if report.Skipped { - fmt.Print(yellowColor) - self.out.Insert(skip) - } else { - fmt.Print(greenColor) - self.out.Insert(success) - } - fmt.Print(resetColor) -} - -func (self *story) Exit() { - self.out.Dedent() - self.currentKey = self.currentKey[:len(self.currentKey)-1] -} - -func (self *story) EndStory() { - self.titlesById = make(map[string]string) - self.out.Println("\n") -} - -func (self *story) Write(content []byte) (written int, err error) { - return len(content), nil // no-op -} - -func NewStoryReporter(out *Printer) *story { - self := new(story) - self.out = out - self.titlesById = make(map[string]string) - return self -} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go deleted file mode 100644 index ece9113..0000000 --- a/vendor/golang.org/x/crypto/acme/acme.go +++ /dev/null @@ -1,921 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package acme provides an implementation of the -// Automatic Certificate Management Environment (ACME) spec. -// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 for details. -// -// Most common scenarios will want to use autocert subdirectory instead, -// which provides automatic access to certificates from Let's Encrypt -// and any other ACME-based CA. -// -// This package is a work in progress and makes no API stability promises. -package acme - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "math/big" - "net/http" - "strings" - "sync" - "time" -) - -const ( - // LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. - LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" - - // ALPNProto is the ALPN protocol name used by a CA server when validating - // tls-alpn-01 challenges. - // - // Package users must ensure their servers can negotiate the ACME ALPN - // in order for tls-alpn-01 challenge verifications to succeed. - ALPNProto = "acme-tls/1" -) - -// idPeACMEIdentifierV1 is the OID for the ACME extension for the TLS-ALPN challenge. -var idPeACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1} - -const ( - maxChainLen = 5 // max depth and breadth of a certificate chain - maxCertSize = 1 << 20 // max size of a certificate, in bytes - - // Max number of collected nonces kept in memory. - // Expect usual peak of 1 or 2. - maxNonces = 100 -) - -// Client is an ACME client. -// The only required field is Key. An example of creating a client with a new key -// is as follows: -// -// key, err := rsa.GenerateKey(rand.Reader, 2048) -// if err != nil { -// log.Fatal(err) -// } -// client := &Client{Key: key} -// -type Client struct { - // Key is the account key used to register with a CA and sign requests. - // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. - Key crypto.Signer - - // HTTPClient optionally specifies an HTTP client to use - // instead of http.DefaultClient. - HTTPClient *http.Client - - // DirectoryURL points to the CA directory endpoint. - // If empty, LetsEncryptURL is used. - // Mutating this value after a successful call of Client's Discover method - // will have no effect. - DirectoryURL string - - // RetryBackoff computes the duration after which the nth retry of a failed request - // should occur. The value of n for the first call on failure is 1. - // The values of r and resp are the request and response of the last failed attempt. - // If the returned value is negative or zero, no more retries are done and an error - // is returned to the caller of the original method. - // - // Requests which result in a 4xx client error are not retried, - // except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests. - // - // If RetryBackoff is nil, a truncated exponential backoff algorithm - // with the ceiling of 10 seconds is used, where each subsequent retry n - // is done after either ("Retry-After" + jitter) or (2^n seconds + jitter), - // preferring the former if "Retry-After" header is found in the resp. - // The jitter is a random value up to 1 second. - RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration - - dirMu sync.Mutex // guards writes to dir - dir *Directory // cached result of Client's Discover method - - noncesMu sync.Mutex - nonces map[string]struct{} // nonces collected from previous responses -} - -// Discover performs ACME server discovery using c.DirectoryURL. -// -// It caches successful result. So, subsequent calls will not result in -// a network round-trip. This also means mutating c.DirectoryURL after successful call -// of this method will have no effect. -func (c *Client) Discover(ctx context.Context) (Directory, error) { - c.dirMu.Lock() - defer c.dirMu.Unlock() - if c.dir != nil { - return *c.dir, nil - } - - dirURL := c.DirectoryURL - if dirURL == "" { - dirURL = LetsEncryptURL - } - res, err := c.get(ctx, dirURL, wantStatus(http.StatusOK)) - if err != nil { - return Directory{}, err - } - defer res.Body.Close() - c.addNonce(res.Header) - - var v struct { - Reg string `json:"new-reg"` - Authz string `json:"new-authz"` - Cert string `json:"new-cert"` - Revoke string `json:"revoke-cert"` - Meta struct { - Terms string `json:"terms-of-service"` - Website string `json:"website"` - CAA []string `json:"caa-identities"` - } - } - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return Directory{}, err - } - c.dir = &Directory{ - RegURL: v.Reg, - AuthzURL: v.Authz, - CertURL: v.Cert, - RevokeURL: v.Revoke, - Terms: v.Meta.Terms, - Website: v.Meta.Website, - CAA: v.Meta.CAA, - } - return *c.dir, nil -} - -// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format. -// The exp argument indicates the desired certificate validity duration. CA may issue a certificate -// with a different duration. -// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain. -// -// In the case where CA server does not provide the issued certificate in the response, -// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips. -// In such a scenario, the caller can cancel the polling with ctx. -// -// CreateCert returns an error if the CA's response or chain was unreasonably large. -// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. -func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { - if _, err := c.Discover(ctx); err != nil { - return nil, "", err - } - - req := struct { - Resource string `json:"resource"` - CSR string `json:"csr"` - NotBefore string `json:"notBefore,omitempty"` - NotAfter string `json:"notAfter,omitempty"` - }{ - Resource: "new-cert", - CSR: base64.RawURLEncoding.EncodeToString(csr), - } - now := timeNow() - req.NotBefore = now.Format(time.RFC3339) - if exp > 0 { - req.NotAfter = now.Add(exp).Format(time.RFC3339) - } - - res, err := c.post(ctx, c.Key, c.dir.CertURL, req, wantStatus(http.StatusCreated)) - if err != nil { - return nil, "", err - } - defer res.Body.Close() - - curl := res.Header.Get("Location") // cert permanent URL - if res.ContentLength == 0 { - // no cert in the body; poll until we get it - cert, err := c.FetchCert(ctx, curl, bundle) - return cert, curl, err - } - // slurp issued cert and CA chain, if requested - cert, err := c.responseCert(ctx, res, bundle) - return cert, curl, err -} - -// FetchCert retrieves already issued certificate from the given url, in DER format. -// It retries the request until the certificate is successfully retrieved, -// context is cancelled by the caller or an error response is received. -// -// The returned value will also contain the CA (issuer) certificate if the bundle argument is true. -// -// FetchCert returns an error if the CA's response or chain was unreasonably large. -// Callers are encouraged to parse the returned value to ensure the certificate is valid -// and has expected features. -func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { - res, err := c.get(ctx, url, wantStatus(http.StatusOK)) - if err != nil { - return nil, err - } - return c.responseCert(ctx, res, bundle) -} - -// RevokeCert revokes a previously issued certificate cert, provided in DER format. -// -// The key argument, used to sign the request, must be authorized -// to revoke the certificate. It's up to the CA to decide which keys are authorized. -// For instance, the key pair of the certificate may be authorized. -// If the key is nil, c.Key is used instead. -func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { - if _, err := c.Discover(ctx); err != nil { - return err - } - - body := &struct { - Resource string `json:"resource"` - Cert string `json:"certificate"` - Reason int `json:"reason"` - }{ - Resource: "revoke-cert", - Cert: base64.RawURLEncoding.EncodeToString(cert), - Reason: int(reason), - } - if key == nil { - key = c.Key - } - res, err := c.post(ctx, key, c.dir.RevokeURL, body, wantStatus(http.StatusOK)) - if err != nil { - return err - } - defer res.Body.Close() - return nil -} - -// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service -// during account registration. See Register method of Client for more details. -func AcceptTOS(tosURL string) bool { return true } - -// Register creates a new account registration by following the "new-reg" flow. -// It returns the registered account. The account is not modified. -// -// The registration may require the caller to agree to the CA's Terms of Service (TOS). -// If so, and the account has not indicated the acceptance of the terms (see Account for details), -// Register calls prompt with a TOS URL provided by the CA. Prompt should report -// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. -func (c *Client) Register(ctx context.Context, a *Account, prompt func(tosURL string) bool) (*Account, error) { - if _, err := c.Discover(ctx); err != nil { - return nil, err - } - - var err error - if a, err = c.doReg(ctx, c.dir.RegURL, "new-reg", a); err != nil { - return nil, err - } - var accept bool - if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms { - accept = prompt(a.CurrentTerms) - } - if accept { - a.AgreedTerms = a.CurrentTerms - a, err = c.UpdateReg(ctx, a) - } - return a, err -} - -// GetReg retrieves an existing registration. -// The url argument is an Account URI. -func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { - a, err := c.doReg(ctx, url, "reg", nil) - if err != nil { - return nil, err - } - a.URI = url - return a, nil -} - -// UpdateReg updates an existing registration. -// It returns an updated account copy. The provided account is not modified. -func (c *Client) UpdateReg(ctx context.Context, a *Account) (*Account, error) { - uri := a.URI - a, err := c.doReg(ctx, uri, "reg", a) - if err != nil { - return nil, err - } - a.URI = uri - return a, nil -} - -// Authorize performs the initial step in an authorization flow. -// The caller will then need to choose from and perform a set of returned -// challenges using c.Accept in order to successfully complete authorization. -// -// If an authorization has been previously granted, the CA may return -// a valid authorization (Authorization.Status is StatusValid). If so, the caller -// need not fulfill any challenge and can proceed to requesting a certificate. -func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { - if _, err := c.Discover(ctx); err != nil { - return nil, err - } - - type authzID struct { - Type string `json:"type"` - Value string `json:"value"` - } - req := struct { - Resource string `json:"resource"` - Identifier authzID `json:"identifier"` - }{ - Resource: "new-authz", - Identifier: authzID{Type: "dns", Value: domain}, - } - res, err := c.post(ctx, c.Key, c.dir.AuthzURL, req, wantStatus(http.StatusCreated)) - if err != nil { - return nil, err - } - defer res.Body.Close() - - var v wireAuthz - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - if v.Status != StatusPending && v.Status != StatusValid { - return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) - } - return v.authorization(res.Header.Get("Location")), nil -} - -// GetAuthorization retrieves an authorization identified by the given URL. -// -// If a caller needs to poll an authorization until its status is final, -// see the WaitAuthorization method. -func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { - res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) - if err != nil { - return nil, err - } - defer res.Body.Close() - var v wireAuthz - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - return v.authorization(url), nil -} - -// RevokeAuthorization relinquishes an existing authorization identified -// by the given URL. -// The url argument is an Authorization.URI value. -// -// If successful, the caller will be required to obtain a new authorization -// using the Authorize method before being able to request a new certificate -// for the domain associated with the authorization. -// -// It does not revoke existing certificates. -func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { - req := struct { - Resource string `json:"resource"` - Status string `json:"status"` - Delete bool `json:"delete"` - }{ - Resource: "authz", - Status: "deactivated", - Delete: true, - } - res, err := c.post(ctx, c.Key, url, req, wantStatus(http.StatusOK)) - if err != nil { - return err - } - defer res.Body.Close() - return nil -} - -// WaitAuthorization polls an authorization at the given URL -// until it is in one of the final states, StatusValid or StatusInvalid, -// the ACME CA responded with a 4xx error code, or the context is done. -// -// It returns a non-nil Authorization only if its Status is StatusValid. -// In all other cases WaitAuthorization returns an error. -// If the Status is StatusInvalid, the returned error is of type *AuthorizationError. -func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { - for { - res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) - if err != nil { - return nil, err - } - - var raw wireAuthz - err = json.NewDecoder(res.Body).Decode(&raw) - res.Body.Close() - switch { - case err != nil: - // Skip and retry. - case raw.Status == StatusValid: - return raw.authorization(url), nil - case raw.Status == StatusInvalid: - return nil, raw.error(url) - } - - // Exponential backoff is implemented in c.get above. - // This is just to prevent continuously hitting the CA - // while waiting for a final authorization status. - d := retryAfter(res.Header.Get("Retry-After")) - if d == 0 { - // Given that the fastest challenges TLS-SNI and HTTP-01 - // require a CA to make at least 1 network round trip - // and most likely persist a challenge state, - // this default delay seems reasonable. - d = time.Second - } - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return nil, ctx.Err() - case <-t.C: - // Retry. - } - } -} - -// GetChallenge retrieves the current status of an challenge. -// -// A client typically polls a challenge status using this method. -func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { - res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) - if err != nil { - return nil, err - } - defer res.Body.Close() - v := wireChallenge{URI: url} - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - return v.challenge(), nil -} - -// Accept informs the server that the client accepts one of its challenges -// previously obtained with c.Authorize. -// -// The server will then perform the validation asynchronously. -func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { - auth, err := keyAuth(c.Key.Public(), chal.Token) - if err != nil { - return nil, err - } - - req := struct { - Resource string `json:"resource"` - Type string `json:"type"` - Auth string `json:"keyAuthorization"` - }{ - Resource: "challenge", - Type: chal.Type, - Auth: auth, - } - res, err := c.post(ctx, c.Key, chal.URI, req, wantStatus( - http.StatusOK, // according to the spec - http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) - )) - if err != nil { - return nil, err - } - defer res.Body.Close() - - var v wireChallenge - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - return v.challenge(), nil -} - -// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. -// A TXT record containing the returned value must be provisioned under -// "_acme-challenge" name of the domain being validated. -// -// The token argument is a Challenge.Token value. -func (c *Client) DNS01ChallengeRecord(token string) (string, error) { - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return "", err - } - b := sha256.Sum256([]byte(ka)) - return base64.RawURLEncoding.EncodeToString(b[:]), nil -} - -// HTTP01ChallengeResponse returns the response for an http-01 challenge. -// Servers should respond with the value to HTTP requests at the URL path -// provided by HTTP01ChallengePath to validate the challenge and prove control -// over a domain name. -// -// The token argument is a Challenge.Token value. -func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { - return keyAuth(c.Key.Public(), token) -} - -// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge -// should be provided by the servers. -// The response value can be obtained with HTTP01ChallengeResponse. -// -// The token argument is a Challenge.Token value. -func (c *Client) HTTP01ChallengePath(token string) string { - return "/.well-known/acme-challenge/" + token -} - -// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. -// Servers can present the certificate to validate the challenge and prove control -// over a domain name. -// -// The implementation is incomplete in that the returned value is a single certificate, -// computed only for Z0 of the key authorization. ACME CAs are expected to update -// their implementations to use the newer version, TLS-SNI-02. -// For more details on TLS-SNI-01 see https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-7.3. -// -// The token argument is a Challenge.Token value. -// If a WithKey option is provided, its private part signs the returned cert, -// and the public part is used to specify the signee. -// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. -// -// The returned certificate is valid for the next 24 hours and must be presented only when -// the server name of the TLS ClientHello matches exactly the returned name value. -func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return tls.Certificate{}, "", err - } - b := sha256.Sum256([]byte(ka)) - h := hex.EncodeToString(b[:]) - name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) - cert, err = tlsChallengeCert([]string{name}, opt) - if err != nil { - return tls.Certificate{}, "", err - } - return cert, name, nil -} - -// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. -// Servers can present the certificate to validate the challenge and prove control -// over a domain name. For more details on TLS-SNI-02 see -// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-7.3. -// -// The token argument is a Challenge.Token value. -// If a WithKey option is provided, its private part signs the returned cert, -// and the public part is used to specify the signee. -// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. -// -// The returned certificate is valid for the next 24 hours and must be presented only when -// the server name in the TLS ClientHello matches exactly the returned name value. -func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { - b := sha256.Sum256([]byte(token)) - h := hex.EncodeToString(b[:]) - sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) - - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return tls.Certificate{}, "", err - } - b = sha256.Sum256([]byte(ka)) - h = hex.EncodeToString(b[:]) - sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) - - cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) - if err != nil { - return tls.Certificate{}, "", err - } - return cert, sanA, nil -} - -// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response. -// Servers can present the certificate to validate the challenge and prove control -// over a domain name. For more details on TLS-ALPN-01 see -// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3 -// -// The token argument is a Challenge.Token value. -// If a WithKey option is provided, its private part signs the returned cert, -// and the public part is used to specify the signee. -// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. -// -// The returned certificate is valid for the next 24 hours and must be presented only when -// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol -// has been specified. -func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) { - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return tls.Certificate{}, err - } - shasum := sha256.Sum256([]byte(ka)) - extValue, err := asn1.Marshal(shasum[:]) - if err != nil { - return tls.Certificate{}, err - } - acmeExtension := pkix.Extension{ - Id: idPeACMEIdentifierV1, - Critical: true, - Value: extValue, - } - - tmpl := defaultTLSChallengeCertTemplate() - - var newOpt []CertOption - for _, o := range opt { - switch o := o.(type) { - case *certOptTemplate: - t := *(*x509.Certificate)(o) // shallow copy is ok - tmpl = &t - default: - newOpt = append(newOpt, o) - } - } - tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension) - newOpt = append(newOpt, WithTemplate(tmpl)) - return tlsChallengeCert([]string{domain}, newOpt) -} - -// doReg sends all types of registration requests. -// The type of request is identified by typ argument, which is a "resource" -// in the ACME spec terms. -// -// A non-nil acct argument indicates whether the intention is to mutate data -// of the Account. Only Contact and Agreement of its fields are used -// in such cases. -func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) { - req := struct { - Resource string `json:"resource"` - Contact []string `json:"contact,omitempty"` - Agreement string `json:"agreement,omitempty"` - }{ - Resource: typ, - } - if acct != nil { - req.Contact = acct.Contact - req.Agreement = acct.AgreedTerms - } - res, err := c.post(ctx, c.Key, url, req, wantStatus( - http.StatusOK, // updates and deletes - http.StatusCreated, // new account creation - http.StatusAccepted, // Let's Encrypt divergent implementation - )) - if err != nil { - return nil, err - } - defer res.Body.Close() - - var v struct { - Contact []string - Agreement string - Authorizations string - Certificates string - } - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - var tos string - if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 { - tos = v[0] - } - var authz string - if v := linkHeader(res.Header, "next"); len(v) > 0 { - authz = v[0] - } - return &Account{ - URI: res.Header.Get("Location"), - Contact: v.Contact, - AgreedTerms: v.Agreement, - CurrentTerms: tos, - Authz: authz, - Authorizations: v.Authorizations, - Certificates: v.Certificates, - }, nil -} - -// popNonce returns a nonce value previously stored with c.addNonce -// or fetches a fresh one from the given URL. -func (c *Client) popNonce(ctx context.Context, url string) (string, error) { - c.noncesMu.Lock() - defer c.noncesMu.Unlock() - if len(c.nonces) == 0 { - return c.fetchNonce(ctx, url) - } - var nonce string - for nonce = range c.nonces { - delete(c.nonces, nonce) - break - } - return nonce, nil -} - -// clearNonces clears any stored nonces -func (c *Client) clearNonces() { - c.noncesMu.Lock() - defer c.noncesMu.Unlock() - c.nonces = make(map[string]struct{}) -} - -// addNonce stores a nonce value found in h (if any) for future use. -func (c *Client) addNonce(h http.Header) { - v := nonceFromHeader(h) - if v == "" { - return - } - c.noncesMu.Lock() - defer c.noncesMu.Unlock() - if len(c.nonces) >= maxNonces { - return - } - if c.nonces == nil { - c.nonces = make(map[string]struct{}) - } - c.nonces[v] = struct{}{} -} - -func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { - r, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return "", err - } - resp, err := c.doNoRetry(ctx, r) - if err != nil { - return "", err - } - defer resp.Body.Close() - nonce := nonceFromHeader(resp.Header) - if nonce == "" { - if resp.StatusCode > 299 { - return "", responseError(resp) - } - return "", errors.New("acme: nonce not found") - } - return nonce, nil -} - -func nonceFromHeader(h http.Header) string { - return h.Get("Replay-Nonce") -} - -func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bool) ([][]byte, error) { - b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) - if err != nil { - return nil, fmt.Errorf("acme: response stream: %v", err) - } - if len(b) > maxCertSize { - return nil, errors.New("acme: certificate is too big") - } - cert := [][]byte{b} - if !bundle { - return cert, nil - } - - // Append CA chain cert(s). - // At least one is required according to the spec: - // https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1 - up := linkHeader(res.Header, "up") - if len(up) == 0 { - return nil, errors.New("acme: rel=up link not found") - } - if len(up) > maxChainLen { - return nil, errors.New("acme: rel=up link is too large") - } - for _, url := range up { - cc, err := c.chainCert(ctx, url, 0) - if err != nil { - return nil, err - } - cert = append(cert, cc...) - } - return cert, nil -} - -// chainCert fetches CA certificate chain recursively by following "up" links. -// Each recursive call increments the depth by 1, resulting in an error -// if the recursion level reaches maxChainLen. -// -// First chainCert call starts with depth of 0. -func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte, error) { - if depth >= maxChainLen { - return nil, errors.New("acme: certificate chain is too deep") - } - - res, err := c.get(ctx, url, wantStatus(http.StatusOK)) - if err != nil { - return nil, err - } - defer res.Body.Close() - b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) - if err != nil { - return nil, err - } - if len(b) > maxCertSize { - return nil, errors.New("acme: certificate is too big") - } - chain := [][]byte{b} - - uplink := linkHeader(res.Header, "up") - if len(uplink) > maxChainLen { - return nil, errors.New("acme: certificate chain is too large") - } - for _, up := range uplink { - cc, err := c.chainCert(ctx, up, depth+1) - if err != nil { - return nil, err - } - chain = append(chain, cc...) - } - - return chain, nil -} - -// linkHeader returns URI-Reference values of all Link headers -// with relation-type rel. -// See https://tools.ietf.org/html/rfc5988#section-5 for details. -func linkHeader(h http.Header, rel string) []string { - var links []string - for _, v := range h["Link"] { - parts := strings.Split(v, ";") - for _, p := range parts { - p = strings.TrimSpace(p) - if !strings.HasPrefix(p, "rel=") { - continue - } - if v := strings.Trim(p[4:], `"`); v == rel { - links = append(links, strings.Trim(parts[0], "<>")) - } - } - } - return links -} - -// keyAuth generates a key authorization string for a given token. -func keyAuth(pub crypto.PublicKey, token string) (string, error) { - th, err := JWKThumbprint(pub) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", token, th), nil -} - -// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges. -func defaultTLSChallengeCertTemplate() *x509.Certificate { - return &x509.Certificate{ - SerialNumber: big.NewInt(1), - NotBefore: time.Now(), - NotAfter: time.Now().Add(24 * time.Hour), - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - } -} - -// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges -// with the given SANs and auto-generated public/private key pair. -// The Subject Common Name is set to the first SAN to aid debugging. -// To create a cert with a custom key pair, specify WithKey option. -func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { - var key crypto.Signer - tmpl := defaultTLSChallengeCertTemplate() - for _, o := range opt { - switch o := o.(type) { - case *certOptKey: - if key != nil { - return tls.Certificate{}, errors.New("acme: duplicate key option") - } - key = o.key - case *certOptTemplate: - t := *(*x509.Certificate)(o) // shallow copy is ok - tmpl = &t - default: - // package's fault, if we let this happen: - panic(fmt.Sprintf("unsupported option type %T", o)) - } - } - if key == nil { - var err error - if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { - return tls.Certificate{}, err - } - } - tmpl.DNSNames = san - if len(san) > 0 { - tmpl.Subject.CommonName = san[0] - } - - der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) - if err != nil { - return tls.Certificate{}, err - } - return tls.Certificate{ - Certificate: [][]byte{der}, - PrivateKey: key, - }, nil -} - -// encodePEM returns b encoded as PEM with block of type typ. -func encodePEM(typ string, b []byte) []byte { - pb := &pem.Block{Type: typ, Bytes: b} - return pem.EncodeToMemory(pb) -} - -// timeNow is useful for testing for fixed current time. -var timeNow = time.Now diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go deleted file mode 100644 index 1a9d972..0000000 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go +++ /dev/null @@ -1,1127 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package autocert provides automatic access to certificates from Let's Encrypt -// and any other ACME-based CA. -// -// This package is a work in progress and makes no API stability promises. -package autocert - -import ( - "bytes" - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "io" - mathrand "math/rand" - "net" - "net/http" - "path" - "strings" - "sync" - "time" - - "golang.org/x/crypto/acme" -) - -// createCertRetryAfter is how much time to wait before removing a failed state -// entry due to an unsuccessful createCert call. -// This is a variable instead of a const for testing. -// TODO: Consider making it configurable or an exp backoff? -var createCertRetryAfter = time.Minute - -// pseudoRand is safe for concurrent use. -var pseudoRand *lockedMathRand - -func init() { - src := mathrand.NewSource(timeNow().UnixNano()) - pseudoRand = &lockedMathRand{rnd: mathrand.New(src)} -} - -// AcceptTOS is a Manager.Prompt function that always returns true to -// indicate acceptance of the CA's Terms of Service during account -// registration. -func AcceptTOS(tosURL string) bool { return true } - -// HostPolicy specifies which host names the Manager is allowed to respond to. -// It returns a non-nil error if the host should be rejected. -// The returned error is accessible via tls.Conn.Handshake and its callers. -// See Manager's HostPolicy field and GetCertificate method docs for more details. -type HostPolicy func(ctx context.Context, host string) error - -// HostWhitelist returns a policy where only the specified host names are allowed. -// Only exact matches are currently supported. Subdomains, regexp or wildcard -// will not match. -func HostWhitelist(hosts ...string) HostPolicy { - whitelist := make(map[string]bool, len(hosts)) - for _, h := range hosts { - whitelist[h] = true - } - return func(_ context.Context, host string) error { - if !whitelist[host] { - return errors.New("acme/autocert: host not configured") - } - return nil - } -} - -// defaultHostPolicy is used when Manager.HostPolicy is not set. -func defaultHostPolicy(context.Context, string) error { - return nil -} - -// Manager is a stateful certificate manager built on top of acme.Client. -// It obtains and refreshes certificates automatically using "tls-alpn-01", -// "tls-sni-01", "tls-sni-02" and "http-01" challenge types, -// as well as providing them to a TLS server via tls.Config. -// -// You must specify a cache implementation, such as DirCache, -// to reuse obtained certificates across program restarts. -// Otherwise your server is very likely to exceed the certificate -// issuer's request rate limits. -type Manager struct { - // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). - // The registration may require the caller to agree to the CA's TOS. - // If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report - // whether the caller agrees to the terms. - // - // To always accept the terms, the callers can use AcceptTOS. - Prompt func(tosURL string) bool - - // Cache optionally stores and retrieves previously-obtained certificates - // and other state. If nil, certs will only be cached for the lifetime of - // the Manager. Multiple Managers can share the same Cache. - // - // Using a persistent Cache, such as DirCache, is strongly recommended. - Cache Cache - - // HostPolicy controls which domains the Manager will attempt - // to retrieve new certificates for. It does not affect cached certs. - // - // If non-nil, HostPolicy is called before requesting a new cert. - // If nil, all hosts are currently allowed. This is not recommended, - // as it opens a potential attack where clients connect to a server - // by IP address and pretend to be asking for an incorrect host name. - // Manager will attempt to obtain a certificate for that host, incorrectly, - // eventually reaching the CA's rate limit for certificate requests - // and making it impossible to obtain actual certificates. - // - // See GetCertificate for more details. - HostPolicy HostPolicy - - // RenewBefore optionally specifies how early certificates should - // be renewed before they expire. - // - // If zero, they're renewed 30 days before expiration. - RenewBefore time.Duration - - // Client is used to perform low-level operations, such as account registration - // and requesting new certificates. - // - // If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL - // as directory endpoint. If the Client.Key is nil, a new ECDSA P-256 key is - // generated and, if Cache is not nil, stored in cache. - // - // Mutating the field after the first call of GetCertificate method will have no effect. - Client *acme.Client - - // Email optionally specifies a contact email address. - // This is used by CAs, such as Let's Encrypt, to notify about problems - // with issued certificates. - // - // If the Client's account key is already registered, Email is not used. - Email string - - // ForceRSA used to make the Manager generate RSA certificates. It is now ignored. - // - // Deprecated: the Manager will request the correct type of certificate based - // on what each client supports. - ForceRSA bool - - // ExtraExtensions are used when generating a new CSR (Certificate Request), - // thus allowing customization of the resulting certificate. - // For instance, TLS Feature Extension (RFC 7633) can be used - // to prevent an OCSP downgrade attack. - // - // The field value is passed to crypto/x509.CreateCertificateRequest - // in the template's ExtraExtensions field as is. - ExtraExtensions []pkix.Extension - - clientMu sync.Mutex - client *acme.Client // initialized by acmeClient method - - stateMu sync.Mutex - state map[certKey]*certState - - // renewal tracks the set of domains currently running renewal timers. - renewalMu sync.Mutex - renewal map[certKey]*domainRenewal - - // tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens. - tokensMu sync.RWMutex - // tryHTTP01 indicates whether the Manager should try "http-01" challenge type - // during the authorization flow. - tryHTTP01 bool - // httpTokens contains response body values for http-01 challenges - // and is keyed by the URL path at which a challenge response is expected - // to be provisioned. - // The entries are stored for the duration of the authorization flow. - httpTokens map[string][]byte - // certTokens contains temporary certificates for tls-sni and tls-alpn challenges - // and is keyed by token domain name, which matches server name of ClientHello. - // Keys always have ".acme.invalid" suffix for tls-sni. Otherwise, they are domain names - // for tls-alpn. - // The entries are stored for the duration of the authorization flow. - certTokens map[string]*tls.Certificate -} - -// certKey is the key by which certificates are tracked in state, renewal and cache. -type certKey struct { - domain string // without trailing dot - isRSA bool // RSA cert for legacy clients (as opposed to default ECDSA) - isToken bool // tls-based challenge token cert; key type is undefined regardless of isRSA -} - -func (c certKey) String() string { - if c.isToken { - return c.domain + "+token" - } - if c.isRSA { - return c.domain + "+rsa" - } - return c.domain -} - -// TLSConfig creates a new TLS config suitable for net/http.Server servers, -// supporting HTTP/2 and the tls-alpn-01 ACME challenge type. -func (m *Manager) TLSConfig() *tls.Config { - return &tls.Config{ - GetCertificate: m.GetCertificate, - NextProtos: []string{ - "h2", "http/1.1", // enable HTTP/2 - acme.ALPNProto, // enable tls-alpn ACME challenges - }, - } -} - -// GetCertificate implements the tls.Config.GetCertificate hook. -// It provides a TLS certificate for hello.ServerName host, including answering -// tls-alpn-01 and *.acme.invalid (tls-sni-01 and tls-sni-02) challenges. -// All other fields of hello are ignored. -// -// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting -// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation. -// The error is propagated back to the caller of GetCertificate and is user-visible. -// This does not affect cached certs. See HostPolicy field description for more details. -func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { - if m.Prompt == nil { - return nil, errors.New("acme/autocert: Manager.Prompt not set") - } - - name := hello.ServerName - if name == "" { - return nil, errors.New("acme/autocert: missing server name") - } - if !strings.Contains(strings.Trim(name, "."), ".") { - return nil, errors.New("acme/autocert: server name component count invalid") - } - if strings.ContainsAny(name, `+/\`) { - return nil, errors.New("acme/autocert: server name contains invalid character") - } - - // In the worst-case scenario, the timeout needs to account for caching, host policy, - // domain ownership verification and certificate issuance. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - // Check whether this is a token cert requested for TLS-SNI or TLS-ALPN challenge. - if wantsTokenCert(hello) { - m.tokensMu.RLock() - defer m.tokensMu.RUnlock() - // It's ok to use the same token cert key for both tls-sni and tls-alpn - // because there's always at most 1 token cert per on-going domain authorization. - // See m.verify for details. - if cert := m.certTokens[name]; cert != nil { - return cert, nil - } - if cert, err := m.cacheGet(ctx, certKey{domain: name, isToken: true}); err == nil { - return cert, nil - } - // TODO: cache error results? - return nil, fmt.Errorf("acme/autocert: no token cert for %q", name) - } - - // regular domain - ck := certKey{ - domain: strings.TrimSuffix(name, "."), // golang.org/issue/18114 - isRSA: !supportsECDSA(hello), - } - cert, err := m.cert(ctx, ck) - if err == nil { - return cert, nil - } - if err != ErrCacheMiss { - return nil, err - } - - // first-time - if err := m.hostPolicy()(ctx, name); err != nil { - return nil, err - } - cert, err = m.createCert(ctx, ck) - if err != nil { - return nil, err - } - m.cachePut(ctx, ck, cert) - return cert, nil -} - -// wantsTokenCert reports whether a TLS request with SNI is made by a CA server -// for a challenge verification. -func wantsTokenCert(hello *tls.ClientHelloInfo) bool { - // tls-alpn-01 - if len(hello.SupportedProtos) == 1 && hello.SupportedProtos[0] == acme.ALPNProto { - return true - } - // tls-sni-xx - return strings.HasSuffix(hello.ServerName, ".acme.invalid") -} - -func supportsECDSA(hello *tls.ClientHelloInfo) bool { - // The "signature_algorithms" extension, if present, limits the key exchange - // algorithms allowed by the cipher suites. See RFC 5246, section 7.4.1.4.1. - if hello.SignatureSchemes != nil { - ecdsaOK := false - schemeLoop: - for _, scheme := range hello.SignatureSchemes { - const tlsECDSAWithSHA1 tls.SignatureScheme = 0x0203 // constant added in Go 1.10 - switch scheme { - case tlsECDSAWithSHA1, tls.ECDSAWithP256AndSHA256, - tls.ECDSAWithP384AndSHA384, tls.ECDSAWithP521AndSHA512: - ecdsaOK = true - break schemeLoop - } - } - if !ecdsaOK { - return false - } - } - if hello.SupportedCurves != nil { - ecdsaOK := false - for _, curve := range hello.SupportedCurves { - if curve == tls.CurveP256 { - ecdsaOK = true - break - } - } - if !ecdsaOK { - return false - } - } - for _, suite := range hello.CipherSuites { - switch suite { - case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: - return true - } - } - return false -} - -// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses. -// It returns an http.Handler that responds to the challenges and must be -// running on port 80. If it receives a request that is not an ACME challenge, -// it delegates the request to the optional fallback handler. -// -// If fallback is nil, the returned handler redirects all GET and HEAD requests -// to the default TLS port 443 with 302 Found status code, preserving the original -// request path and query. It responds with 400 Bad Request to all other HTTP methods. -// The fallback is not protected by the optional HostPolicy. -// -// Because the fallback handler is run with unencrypted port 80 requests, -// the fallback should not serve TLS-only requests. -// -// If HTTPHandler is never called, the Manager will only use TLS SNI -// challenges for domain verification. -func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - m.tryHTTP01 = true - - if fallback == nil { - fallback = http.HandlerFunc(handleHTTPRedirect) - } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { - fallback.ServeHTTP(w, r) - return - } - // A reasonable context timeout for cache and host policy only, - // because we don't wait for a new certificate issuance here. - ctx, cancel := context.WithTimeout(r.Context(), time.Minute) - defer cancel() - if err := m.hostPolicy()(ctx, r.Host); err != nil { - http.Error(w, err.Error(), http.StatusForbidden) - return - } - data, err := m.httpToken(ctx, r.URL.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - w.Write(data) - }) -} - -func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" && r.Method != "HEAD" { - http.Error(w, "Use HTTPS", http.StatusBadRequest) - return - } - target := "https://" + stripPort(r.Host) + r.URL.RequestURI() - http.Redirect(w, r, target, http.StatusFound) -} - -func stripPort(hostport string) string { - host, _, err := net.SplitHostPort(hostport) - if err != nil { - return hostport - } - return net.JoinHostPort(host, "443") -} - -// cert returns an existing certificate either from m.state or cache. -// If a certificate is found in cache but not in m.state, the latter will be filled -// with the cached value. -func (m *Manager) cert(ctx context.Context, ck certKey) (*tls.Certificate, error) { - m.stateMu.Lock() - if s, ok := m.state[ck]; ok { - m.stateMu.Unlock() - s.RLock() - defer s.RUnlock() - return s.tlscert() - } - defer m.stateMu.Unlock() - cert, err := m.cacheGet(ctx, ck) - if err != nil { - return nil, err - } - signer, ok := cert.PrivateKey.(crypto.Signer) - if !ok { - return nil, errors.New("acme/autocert: private key cannot sign") - } - if m.state == nil { - m.state = make(map[certKey]*certState) - } - s := &certState{ - key: signer, - cert: cert.Certificate, - leaf: cert.Leaf, - } - m.state[ck] = s - go m.renew(ck, s.key, s.leaf.NotAfter) - return cert, nil -} - -// cacheGet always returns a valid certificate, or an error otherwise. -// If a cached certificate exists but is not valid, ErrCacheMiss is returned. -func (m *Manager) cacheGet(ctx context.Context, ck certKey) (*tls.Certificate, error) { - if m.Cache == nil { - return nil, ErrCacheMiss - } - data, err := m.Cache.Get(ctx, ck.String()) - if err != nil { - return nil, err - } - - // private - priv, pub := pem.Decode(data) - if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { - return nil, ErrCacheMiss - } - privKey, err := parsePrivateKey(priv.Bytes) - if err != nil { - return nil, err - } - - // public - var pubDER [][]byte - for len(pub) > 0 { - var b *pem.Block - b, pub = pem.Decode(pub) - if b == nil { - break - } - pubDER = append(pubDER, b.Bytes) - } - if len(pub) > 0 { - // Leftover content not consumed by pem.Decode. Corrupt. Ignore. - return nil, ErrCacheMiss - } - - // verify and create TLS cert - leaf, err := validCert(ck, pubDER, privKey) - if err != nil { - return nil, ErrCacheMiss - } - tlscert := &tls.Certificate{ - Certificate: pubDER, - PrivateKey: privKey, - Leaf: leaf, - } - return tlscert, nil -} - -func (m *Manager) cachePut(ctx context.Context, ck certKey, tlscert *tls.Certificate) error { - if m.Cache == nil { - return nil - } - - // contains PEM-encoded data - var buf bytes.Buffer - - // private - switch key := tlscert.PrivateKey.(type) { - case *ecdsa.PrivateKey: - if err := encodeECDSAKey(&buf, key); err != nil { - return err - } - case *rsa.PrivateKey: - b := x509.MarshalPKCS1PrivateKey(key) - pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b} - if err := pem.Encode(&buf, pb); err != nil { - return err - } - default: - return errors.New("acme/autocert: unknown private key type") - } - - // public - for _, b := range tlscert.Certificate { - pb := &pem.Block{Type: "CERTIFICATE", Bytes: b} - if err := pem.Encode(&buf, pb); err != nil { - return err - } - } - - return m.Cache.Put(ctx, ck.String(), buf.Bytes()) -} - -func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error { - b, err := x509.MarshalECPrivateKey(key) - if err != nil { - return err - } - pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - return pem.Encode(w, pb) -} - -// createCert starts the domain ownership verification and returns a certificate -// for that domain upon success. -// -// If the domain is already being verified, it waits for the existing verification to complete. -// Either way, createCert blocks for the duration of the whole process. -func (m *Manager) createCert(ctx context.Context, ck certKey) (*tls.Certificate, error) { - // TODO: maybe rewrite this whole piece using sync.Once - state, err := m.certState(ck) - if err != nil { - return nil, err - } - // state may exist if another goroutine is already working on it - // in which case just wait for it to finish - if !state.locked { - state.RLock() - defer state.RUnlock() - return state.tlscert() - } - - // We are the first; state is locked. - // Unblock the readers when domain ownership is verified - // and we got the cert or the process failed. - defer state.Unlock() - state.locked = false - - der, leaf, err := m.authorizedCert(ctx, state.key, ck) - if err != nil { - // Remove the failed state after some time, - // making the manager call createCert again on the following TLS hello. - time.AfterFunc(createCertRetryAfter, func() { - defer testDidRemoveState(ck) - m.stateMu.Lock() - defer m.stateMu.Unlock() - // Verify the state hasn't changed and it's still invalid - // before deleting. - s, ok := m.state[ck] - if !ok { - return - } - if _, err := validCert(ck, s.cert, s.key); err == nil { - return - } - delete(m.state, ck) - }) - return nil, err - } - state.cert = der - state.leaf = leaf - go m.renew(ck, state.key, state.leaf.NotAfter) - return state.tlscert() -} - -// certState returns a new or existing certState. -// If a new certState is returned, state.exist is false and the state is locked. -// The returned error is non-nil only in the case where a new state could not be created. -func (m *Manager) certState(ck certKey) (*certState, error) { - m.stateMu.Lock() - defer m.stateMu.Unlock() - if m.state == nil { - m.state = make(map[certKey]*certState) - } - // existing state - if state, ok := m.state[ck]; ok { - return state, nil - } - - // new locked state - var ( - err error - key crypto.Signer - ) - if ck.isRSA { - key, err = rsa.GenerateKey(rand.Reader, 2048) - } else { - key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - } - if err != nil { - return nil, err - } - - state := &certState{ - key: key, - locked: true, - } - state.Lock() // will be unlocked by m.certState caller - m.state[ck] = state - return state, nil -} - -// authorizedCert starts the domain ownership verification process and requests a new cert upon success. -// The key argument is the certificate private key. -func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, ck certKey) (der [][]byte, leaf *x509.Certificate, err error) { - client, err := m.acmeClient(ctx) - if err != nil { - return nil, nil, err - } - - if err := m.verify(ctx, client, ck.domain); err != nil { - return nil, nil, err - } - csr, err := certRequest(key, ck.domain, m.ExtraExtensions) - if err != nil { - return nil, nil, err - } - der, _, err = client.CreateCert(ctx, csr, 0, true) - if err != nil { - return nil, nil, err - } - leaf, err = validCert(ck, der, key) - if err != nil { - return nil, nil, err - } - return der, leaf, nil -} - -// revokePendingAuthz revokes all authorizations idenfied by the elements of uri slice. -// It ignores revocation errors. -func (m *Manager) revokePendingAuthz(ctx context.Context, uri []string) { - client, err := m.acmeClient(ctx) - if err != nil { - return - } - for _, u := range uri { - client.RevokeAuthorization(ctx, u) - } -} - -// verify runs the identifier (domain) authorization flow -// using each applicable ACME challenge type. -func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error { - // The list of challenge types we'll try to fulfill - // in this specific order. - challengeTypes := []string{"tls-alpn-01", "tls-sni-02", "tls-sni-01"} - m.tokensMu.RLock() - if m.tryHTTP01 { - challengeTypes = append(challengeTypes, "http-01") - } - m.tokensMu.RUnlock() - - // Keep track of pending authzs and revoke the ones that did not validate. - pendingAuthzs := make(map[string]bool) - defer func() { - var uri []string - for k, pending := range pendingAuthzs { - if pending { - uri = append(uri, k) - } - } - if len(uri) > 0 { - // Use "detached" background context. - // The revocations need not happen in the current verification flow. - go m.revokePendingAuthz(context.Background(), uri) - } - }() - - // errs accumulates challenge failure errors, printed if all fail - errs := make(map[*acme.Challenge]error) - var nextTyp int // challengeType index of the next challenge type to try - for { - // Start domain authorization and get the challenge. - authz, err := client.Authorize(ctx, domain) - if err != nil { - return err - } - // No point in accepting challenges if the authorization status - // is in a final state. - switch authz.Status { - case acme.StatusValid: - return nil // already authorized - case acme.StatusInvalid: - return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI) - } - - pendingAuthzs[authz.URI] = true - - // Pick the next preferred challenge. - var chal *acme.Challenge - for chal == nil && nextTyp < len(challengeTypes) { - chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges) - nextTyp++ - } - if chal == nil { - errorMsg := fmt.Sprintf("acme/autocert: unable to authorize %q", domain) - for chal, err := range errs { - errorMsg += fmt.Sprintf("; challenge %q failed with error: %v", chal.Type, err) - } - return errors.New(errorMsg) - } - cleanup, err := m.fulfill(ctx, client, chal, domain) - if err != nil { - errs[chal] = err - continue - } - defer cleanup() - if _, err := client.Accept(ctx, chal); err != nil { - errs[chal] = err - continue - } - - // A challenge is fulfilled and accepted: wait for the CA to validate. - if _, err := client.WaitAuthorization(ctx, authz.URI); err != nil { - errs[chal] = err - continue - } - delete(pendingAuthzs, authz.URI) - return nil - } -} - -// fulfill provisions a response to the challenge chal. -// The cleanup is non-nil only if provisioning succeeded. -func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge, domain string) (cleanup func(), err error) { - switch chal.Type { - case "tls-alpn-01": - cert, err := client.TLSALPN01ChallengeCert(chal.Token, domain) - if err != nil { - return nil, err - } - m.putCertToken(ctx, domain, &cert) - return func() { go m.deleteCertToken(domain) }, nil - case "tls-sni-01": - cert, name, err := client.TLSSNI01ChallengeCert(chal.Token) - if err != nil { - return nil, err - } - m.putCertToken(ctx, name, &cert) - return func() { go m.deleteCertToken(name) }, nil - case "tls-sni-02": - cert, name, err := client.TLSSNI02ChallengeCert(chal.Token) - if err != nil { - return nil, err - } - m.putCertToken(ctx, name, &cert) - return func() { go m.deleteCertToken(name) }, nil - case "http-01": - resp, err := client.HTTP01ChallengeResponse(chal.Token) - if err != nil { - return nil, err - } - p := client.HTTP01ChallengePath(chal.Token) - m.putHTTPToken(ctx, p, resp) - return func() { go m.deleteHTTPToken(p) }, nil - } - return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) -} - -func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge { - for _, c := range chal { - if c.Type == typ { - return c - } - } - return nil -} - -// putCertToken stores the token certificate with the specified name -// in both m.certTokens map and m.Cache. -func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - if m.certTokens == nil { - m.certTokens = make(map[string]*tls.Certificate) - } - m.certTokens[name] = cert - m.cachePut(ctx, certKey{domain: name, isToken: true}, cert) -} - -// deleteCertToken removes the token certificate with the specified name -// from both m.certTokens map and m.Cache. -func (m *Manager) deleteCertToken(name string) { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - delete(m.certTokens, name) - if m.Cache != nil { - ck := certKey{domain: name, isToken: true} - m.Cache.Delete(context.Background(), ck.String()) - } -} - -// httpToken retrieves an existing http-01 token value from an in-memory map -// or the optional cache. -func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) { - m.tokensMu.RLock() - defer m.tokensMu.RUnlock() - if v, ok := m.httpTokens[tokenPath]; ok { - return v, nil - } - if m.Cache == nil { - return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath) - } - return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath)) -} - -// putHTTPToken stores an http-01 token value using tokenPath as key -// in both in-memory map and the optional Cache. -// -// It ignores any error returned from Cache.Put. -func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - if m.httpTokens == nil { - m.httpTokens = make(map[string][]byte) - } - b := []byte(val) - m.httpTokens[tokenPath] = b - if m.Cache != nil { - m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b) - } -} - -// deleteHTTPToken removes an http-01 token value from both in-memory map -// and the optional Cache, ignoring any error returned from the latter. -// -// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout. -func (m *Manager) deleteHTTPToken(tokenPath string) { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - delete(m.httpTokens, tokenPath) - if m.Cache != nil { - m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath)) - } -} - -// httpTokenCacheKey returns a key at which an http-01 token value may be stored -// in the Manager's optional Cache. -func httpTokenCacheKey(tokenPath string) string { - return path.Base(tokenPath) + "+http-01" -} - -// renew starts a cert renewal timer loop, one per domain. -// -// The loop is scheduled in two cases: -// - a cert was fetched from cache for the first time (wasn't in m.state) -// - a new cert was created by m.createCert -// -// The key argument is a certificate private key. -// The exp argument is the cert expiration time (NotAfter). -func (m *Manager) renew(ck certKey, key crypto.Signer, exp time.Time) { - m.renewalMu.Lock() - defer m.renewalMu.Unlock() - if m.renewal[ck] != nil { - // another goroutine is already on it - return - } - if m.renewal == nil { - m.renewal = make(map[certKey]*domainRenewal) - } - dr := &domainRenewal{m: m, ck: ck, key: key} - m.renewal[ck] = dr - dr.start(exp) -} - -// stopRenew stops all currently running cert renewal timers. -// The timers are not restarted during the lifetime of the Manager. -func (m *Manager) stopRenew() { - m.renewalMu.Lock() - defer m.renewalMu.Unlock() - for name, dr := range m.renewal { - delete(m.renewal, name) - dr.stop() - } -} - -func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) { - const keyName = "acme_account+key" - - // Previous versions of autocert stored the value under a different key. - const legacyKeyName = "acme_account.key" - - genKey := func() (*ecdsa.PrivateKey, error) { - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - } - - if m.Cache == nil { - return genKey() - } - - data, err := m.Cache.Get(ctx, keyName) - if err == ErrCacheMiss { - data, err = m.Cache.Get(ctx, legacyKeyName) - } - if err == ErrCacheMiss { - key, err := genKey() - if err != nil { - return nil, err - } - var buf bytes.Buffer - if err := encodeECDSAKey(&buf, key); err != nil { - return nil, err - } - if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil { - return nil, err - } - return key, nil - } - if err != nil { - return nil, err - } - - priv, _ := pem.Decode(data) - if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { - return nil, errors.New("acme/autocert: invalid account key found in cache") - } - return parsePrivateKey(priv.Bytes) -} - -func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) { - m.clientMu.Lock() - defer m.clientMu.Unlock() - if m.client != nil { - return m.client, nil - } - - client := m.Client - if client == nil { - client = &acme.Client{DirectoryURL: acme.LetsEncryptURL} - } - if client.Key == nil { - var err error - client.Key, err = m.accountKey(ctx) - if err != nil { - return nil, err - } - } - var contact []string - if m.Email != "" { - contact = []string{"mailto:" + m.Email} - } - a := &acme.Account{Contact: contact} - _, err := client.Register(ctx, a, m.Prompt) - if ae, ok := err.(*acme.Error); err == nil || ok && ae.StatusCode == http.StatusConflict { - // conflict indicates the key is already registered - m.client = client - err = nil - } - return m.client, err -} - -func (m *Manager) hostPolicy() HostPolicy { - if m.HostPolicy != nil { - return m.HostPolicy - } - return defaultHostPolicy -} - -func (m *Manager) renewBefore() time.Duration { - if m.RenewBefore > renewJitter { - return m.RenewBefore - } - return 720 * time.Hour // 30 days -} - -// certState is ready when its mutex is unlocked for reading. -type certState struct { - sync.RWMutex - locked bool // locked for read/write - key crypto.Signer // private key for cert - cert [][]byte // DER encoding - leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil -} - -// tlscert creates a tls.Certificate from s.key and s.cert. -// Callers should wrap it in s.RLock() and s.RUnlock(). -func (s *certState) tlscert() (*tls.Certificate, error) { - if s.key == nil { - return nil, errors.New("acme/autocert: missing signer") - } - if len(s.cert) == 0 { - return nil, errors.New("acme/autocert: missing certificate") - } - return &tls.Certificate{ - PrivateKey: s.key, - Certificate: s.cert, - Leaf: s.leaf, - }, nil -} - -// certRequest generates a CSR for the given common name cn and optional SANs. -func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) { - req := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: cn}, - DNSNames: san, - ExtraExtensions: ext, - } - return x509.CreateCertificateRequest(rand.Reader, req, key) -} - -// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates -// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. -// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. -// -// Inspired by parsePrivateKey in crypto/tls/tls.go. -func parsePrivateKey(der []byte) (crypto.Signer, error) { - if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { - return key, nil - } - if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { - switch key := key.(type) { - case *rsa.PrivateKey: - return key, nil - case *ecdsa.PrivateKey: - return key, nil - default: - return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping") - } - } - if key, err := x509.ParseECPrivateKey(der); err == nil { - return key, nil - } - - return nil, errors.New("acme/autocert: failed to parse private key") -} - -// validCert parses a cert chain provided as der argument and verifies the leaf and der[0] -// correspond to the private key, the domain and key type match, and expiration dates -// are valid. It doesn't do any revocation checking. -// -// The returned value is the verified leaf cert. -func validCert(ck certKey, der [][]byte, key crypto.Signer) (leaf *x509.Certificate, err error) { - // parse public part(s) - var n int - for _, b := range der { - n += len(b) - } - pub := make([]byte, n) - n = 0 - for _, b := range der { - n += copy(pub[n:], b) - } - x509Cert, err := x509.ParseCertificates(pub) - if err != nil || len(x509Cert) == 0 { - return nil, errors.New("acme/autocert: no public key found") - } - // verify the leaf is not expired and matches the domain name - leaf = x509Cert[0] - now := timeNow() - if now.Before(leaf.NotBefore) { - return nil, errors.New("acme/autocert: certificate is not valid yet") - } - if now.After(leaf.NotAfter) { - return nil, errors.New("acme/autocert: expired certificate") - } - if err := leaf.VerifyHostname(ck.domain); err != nil { - return nil, err - } - // ensure the leaf corresponds to the private key and matches the certKey type - switch pub := leaf.PublicKey.(type) { - case *rsa.PublicKey: - prv, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("acme/autocert: private key type does not match public key type") - } - if pub.N.Cmp(prv.N) != 0 { - return nil, errors.New("acme/autocert: private key does not match public key") - } - if !ck.isRSA && !ck.isToken { - return nil, errors.New("acme/autocert: key type does not match expected value") - } - case *ecdsa.PublicKey: - prv, ok := key.(*ecdsa.PrivateKey) - if !ok { - return nil, errors.New("acme/autocert: private key type does not match public key type") - } - if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 { - return nil, errors.New("acme/autocert: private key does not match public key") - } - if ck.isRSA && !ck.isToken { - return nil, errors.New("acme/autocert: key type does not match expected value") - } - default: - return nil, errors.New("acme/autocert: unknown public key algorithm") - } - return leaf, nil -} - -type lockedMathRand struct { - sync.Mutex - rnd *mathrand.Rand -} - -func (r *lockedMathRand) int63n(max int64) int64 { - r.Lock() - n := r.rnd.Int63n(max) - r.Unlock() - return n -} - -// For easier testing. -var ( - timeNow = time.Now - - // Called when a state is removed. - testDidRemoveState = func(certKey) {} -) diff --git a/vendor/golang.org/x/crypto/acme/autocert/cache.go b/vendor/golang.org/x/crypto/acme/autocert/cache.go deleted file mode 100644 index aa9aa84..0000000 --- a/vendor/golang.org/x/crypto/acme/autocert/cache.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package autocert - -import ( - "context" - "errors" - "io/ioutil" - "os" - "path/filepath" -) - -// ErrCacheMiss is returned when a certificate is not found in cache. -var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss") - -// Cache is used by Manager to store and retrieve previously obtained certificates -// and other account data as opaque blobs. -// -// Cache implementations should not rely on the key naming pattern. Keys can -// include any printable ASCII characters, except the following: \/:*?"<>| -type Cache interface { - // Get returns a certificate data for the specified key. - // If there's no such key, Get returns ErrCacheMiss. - Get(ctx context.Context, key string) ([]byte, error) - - // Put stores the data in the cache under the specified key. - // Underlying implementations may use any data storage format, - // as long as the reverse operation, Get, results in the original data. - Put(ctx context.Context, key string, data []byte) error - - // Delete removes a certificate data from the cache under the specified key. - // If there's no such key in the cache, Delete returns nil. - Delete(ctx context.Context, key string) error -} - -// DirCache implements Cache using a directory on the local filesystem. -// If the directory does not exist, it will be created with 0700 permissions. -type DirCache string - -// Get reads a certificate data from the specified file name. -func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) { - name = filepath.Join(string(d), name) - var ( - data []byte - err error - done = make(chan struct{}) - ) - go func() { - data, err = ioutil.ReadFile(name) - close(done) - }() - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-done: - } - if os.IsNotExist(err) { - return nil, ErrCacheMiss - } - return data, err -} - -// Put writes the certificate data to the specified file name. -// The file will be created with 0600 permissions. -func (d DirCache) Put(ctx context.Context, name string, data []byte) error { - if err := os.MkdirAll(string(d), 0700); err != nil { - return err - } - - done := make(chan struct{}) - var err error - go func() { - defer close(done) - var tmp string - if tmp, err = d.writeTempFile(name, data); err != nil { - return - } - select { - case <-ctx.Done(): - // Don't overwrite the file if the context was canceled. - default: - newName := filepath.Join(string(d), name) - err = os.Rename(tmp, newName) - } - }() - select { - case <-ctx.Done(): - return ctx.Err() - case <-done: - } - return err -} - -// Delete removes the specified file name. -func (d DirCache) Delete(ctx context.Context, name string) error { - name = filepath.Join(string(d), name) - var ( - err error - done = make(chan struct{}) - ) - go func() { - err = os.Remove(name) - close(done) - }() - select { - case <-ctx.Done(): - return ctx.Err() - case <-done: - } - if err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// writeTempFile writes b to a temporary file, closes the file and returns its path. -func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) { - // TempFile uses 0600 permissions - f, err := ioutil.TempFile(string(d), prefix) - if err != nil { - return "", err - } - if _, err := f.Write(b); err != nil { - f.Close() - return "", err - } - return f.Name(), f.Close() -} diff --git a/vendor/golang.org/x/crypto/acme/autocert/listener.go b/vendor/golang.org/x/crypto/acme/autocert/listener.go deleted file mode 100644 index 1e06981..0000000 --- a/vendor/golang.org/x/crypto/acme/autocert/listener.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package autocert - -import ( - "crypto/tls" - "log" - "net" - "os" - "path/filepath" - "runtime" - "time" -) - -// NewListener returns a net.Listener that listens on the standard TLS -// port (443) on all interfaces and returns *tls.Conn connections with -// LetsEncrypt certificates for the provided domain or domains. -// -// It enables one-line HTTPS servers: -// -// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler)) -// -// NewListener is a convenience function for a common configuration. -// More complex or custom configurations can use the autocert.Manager -// type instead. -// -// Use of this function implies acceptance of the LetsEncrypt Terms of -// Service. If domains is not empty, the provided domains are passed -// to HostWhitelist. If domains is empty, the listener will do -// LetsEncrypt challenges for any requested domain, which is not -// recommended. -// -// Certificates are cached in a "golang-autocert" directory under an -// operating system-specific cache or temp directory. This may not -// be suitable for servers spanning multiple machines. -// -// The returned listener uses a *tls.Config that enables HTTP/2, and -// should only be used with servers that support HTTP/2. -// -// The returned Listener also enables TCP keep-alives on the accepted -// connections. The returned *tls.Conn are returned before their TLS -// handshake has completed. -func NewListener(domains ...string) net.Listener { - m := &Manager{ - Prompt: AcceptTOS, - } - if len(domains) > 0 { - m.HostPolicy = HostWhitelist(domains...) - } - dir := cacheDir() - if err := os.MkdirAll(dir, 0700); err != nil { - log.Printf("warning: autocert.NewListener not using a cache: %v", err) - } else { - m.Cache = DirCache(dir) - } - return m.Listener() -} - -// Listener listens on the standard TLS port (443) on all interfaces -// and returns a net.Listener returning *tls.Conn connections. -// -// The returned listener uses a *tls.Config that enables HTTP/2, and -// should only be used with servers that support HTTP/2. -// -// The returned Listener also enables TCP keep-alives on the accepted -// connections. The returned *tls.Conn are returned before their TLS -// handshake has completed. -// -// Unlike NewListener, it is the caller's responsibility to initialize -// the Manager m's Prompt, Cache, HostPolicy, and other desired options. -func (m *Manager) Listener() net.Listener { - ln := &listener{ - m: m, - conf: m.TLSConfig(), - } - ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443") - return ln -} - -type listener struct { - m *Manager - conf *tls.Config - - tcpListener net.Listener - tcpListenErr error -} - -func (ln *listener) Accept() (net.Conn, error) { - if ln.tcpListenErr != nil { - return nil, ln.tcpListenErr - } - conn, err := ln.tcpListener.Accept() - if err != nil { - return nil, err - } - tcpConn := conn.(*net.TCPConn) - - // Because Listener is a convenience function, help out with - // this too. This is not possible for the caller to set once - // we return a *tcp.Conn wrapping an inaccessible net.Conn. - // If callers don't want this, they can do things the manual - // way and tweak as needed. But this is what net/http does - // itself, so copy that. If net/http changes, we can change - // here too. - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(3 * time.Minute) - - return tls.Server(tcpConn, ln.conf), nil -} - -func (ln *listener) Addr() net.Addr { - if ln.tcpListener != nil { - return ln.tcpListener.Addr() - } - // net.Listen failed. Return something non-nil in case callers - // call Addr before Accept: - return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443} -} - -func (ln *listener) Close() error { - if ln.tcpListenErr != nil { - return ln.tcpListenErr - } - return ln.tcpListener.Close() -} - -func homeDir() string { - if runtime.GOOS == "windows" { - return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - } - if h := os.Getenv("HOME"); h != "" { - return h - } - return "/" -} - -func cacheDir() string { - const base = "golang-autocert" - switch runtime.GOOS { - case "darwin": - return filepath.Join(homeDir(), "Library", "Caches", base) - case "windows": - for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} { - if v := os.Getenv(ev); v != "" { - return filepath.Join(v, base) - } - } - // Worst case: - return filepath.Join(homeDir(), base) - } - if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { - return filepath.Join(xdg, base) - } - return filepath.Join(homeDir(), ".cache", base) -} diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go deleted file mode 100644 index ef3e44e..0000000 --- a/vendor/golang.org/x/crypto/acme/autocert/renewal.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package autocert - -import ( - "context" - "crypto" - "sync" - "time" -) - -// renewJitter is the maximum deviation from Manager.RenewBefore. -const renewJitter = time.Hour - -// domainRenewal tracks the state used by the periodic timers -// renewing a single domain's cert. -type domainRenewal struct { - m *Manager - ck certKey - key crypto.Signer - - timerMu sync.Mutex - timer *time.Timer -} - -// start starts a cert renewal timer at the time -// defined by the certificate expiration time exp. -// -// If the timer is already started, calling start is a noop. -func (dr *domainRenewal) start(exp time.Time) { - dr.timerMu.Lock() - defer dr.timerMu.Unlock() - if dr.timer != nil { - return - } - dr.timer = time.AfterFunc(dr.next(exp), dr.renew) -} - -// stop stops the cert renewal timer. -// If the timer is already stopped, calling stop is a noop. -func (dr *domainRenewal) stop() { - dr.timerMu.Lock() - defer dr.timerMu.Unlock() - if dr.timer == nil { - return - } - dr.timer.Stop() - dr.timer = nil -} - -// renew is called periodically by a timer. -// The first renew call is kicked off by dr.start. -func (dr *domainRenewal) renew() { - dr.timerMu.Lock() - defer dr.timerMu.Unlock() - if dr.timer == nil { - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - // TODO: rotate dr.key at some point? - next, err := dr.do(ctx) - if err != nil { - next = renewJitter / 2 - next += time.Duration(pseudoRand.int63n(int64(next))) - } - dr.timer = time.AfterFunc(next, dr.renew) - testDidRenewLoop(next, err) -} - -// updateState locks and replaces the relevant Manager.state item with the given -// state. It additionally updates dr.key with the given state's key. -func (dr *domainRenewal) updateState(state *certState) { - dr.m.stateMu.Lock() - defer dr.m.stateMu.Unlock() - dr.key = state.key - dr.m.state[dr.ck] = state -} - -// do is similar to Manager.createCert but it doesn't lock a Manager.state item. -// Instead, it requests a new certificate independently and, upon success, -// replaces dr.m.state item with a new one and updates cache for the given domain. -// -// It may lock and update the Manager.state if the expiration date of the currently -// cached cert is far enough in the future. -// -// The returned value is a time interval after which the renewal should occur again. -func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { - // a race is likely unavoidable in a distributed environment - // but we try nonetheless - if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil { - next := dr.next(tlscert.Leaf.NotAfter) - if next > dr.m.renewBefore()+renewJitter { - signer, ok := tlscert.PrivateKey.(crypto.Signer) - if ok { - state := &certState{ - key: signer, - cert: tlscert.Certificate, - leaf: tlscert.Leaf, - } - dr.updateState(state) - return next, nil - } - } - } - - der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck) - if err != nil { - return 0, err - } - state := &certState{ - key: dr.key, - cert: der, - leaf: leaf, - } - tlscert, err := state.tlscert() - if err != nil { - return 0, err - } - if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil { - return 0, err - } - dr.updateState(state) - return dr.next(leaf.NotAfter), nil -} - -func (dr *domainRenewal) next(expiry time.Time) time.Duration { - d := expiry.Sub(timeNow()) - dr.m.renewBefore() - // add a bit of randomness to renew deadline - n := pseudoRand.int63n(int64(renewJitter)) - d -= time.Duration(n) - if d < 0 { - return 0 - } - return d -} - -var testDidRenewLoop = func(next time.Duration, err error) {} diff --git a/vendor/golang.org/x/crypto/acme/http.go b/vendor/golang.org/x/crypto/acme/http.go deleted file mode 100644 index a43ce6a..0000000 --- a/vendor/golang.org/x/crypto/acme/http.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package acme - -import ( - "bytes" - "context" - "crypto" - "crypto/rand" - "encoding/json" - "fmt" - "io/ioutil" - "math/big" - "net/http" - "strconv" - "strings" - "time" -) - -// retryTimer encapsulates common logic for retrying unsuccessful requests. -// It is not safe for concurrent use. -type retryTimer struct { - // backoffFn provides backoff delay sequence for retries. - // See Client.RetryBackoff doc comment. - backoffFn func(n int, r *http.Request, res *http.Response) time.Duration - // n is the current retry attempt. - n int -} - -func (t *retryTimer) inc() { - t.n++ -} - -// backoff pauses the current goroutine as described in Client.RetryBackoff. -func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error { - d := t.backoffFn(t.n, r, res) - if d <= 0 { - return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n) - } - wakeup := time.NewTimer(d) - defer wakeup.Stop() - select { - case <-ctx.Done(): - return ctx.Err() - case <-wakeup.C: - return nil - } -} - -func (c *Client) retryTimer() *retryTimer { - f := c.RetryBackoff - if f == nil { - f = defaultBackoff - } - return &retryTimer{backoffFn: f} -} - -// defaultBackoff provides default Client.RetryBackoff implementation -// using a truncated exponential backoff algorithm, -// as described in Client.RetryBackoff. -// -// The n argument is always bounded between 1 and 30. -// The returned value is always greater than 0. -func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { - const max = 10 * time.Second - var jitter time.Duration - if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { - // Set the minimum to 1ms to avoid a case where - // an invalid Retry-After value is parsed into 0 below, - // resulting in the 0 returned value which would unintentionally - // stop the retries. - jitter = (1 + time.Duration(x.Int64())) * time.Millisecond - } - if v, ok := res.Header["Retry-After"]; ok { - return retryAfter(v[0]) + jitter - } - - if n < 1 { - n = 1 - } - if n > 30 { - n = 30 - } - d := time.Duration(1< max { - return max - } - return d -} - -// retryAfter parses a Retry-After HTTP header value, -// trying to convert v into an int (seconds) or use http.ParseTime otherwise. -// It returns zero value if v cannot be parsed. -func retryAfter(v string) time.Duration { - if i, err := strconv.Atoi(v); err == nil { - return time.Duration(i) * time.Second - } - t, err := http.ParseTime(v) - if err != nil { - return 0 - } - return t.Sub(timeNow()) -} - -// resOkay is a function that reports whether the provided response is okay. -// It is expected to keep the response body unread. -type resOkay func(*http.Response) bool - -// wantStatus returns a function which reports whether the code -// matches the status code of a response. -func wantStatus(codes ...int) resOkay { - return func(res *http.Response) bool { - for _, code := range codes { - if code == res.StatusCode { - return true - } - } - return false - } -} - -// get issues an unsigned GET request to the specified URL. -// It returns a non-error value only when ok reports true. -// -// get retries unsuccessful attempts according to c.RetryBackoff -// until the context is done or a non-retriable error is received. -func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) { - retry := c.retryTimer() - for { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - res, err := c.doNoRetry(ctx, req) - switch { - case err != nil: - return nil, err - case ok(res): - return res, nil - case isRetriable(res.StatusCode): - retry.inc() - resErr := responseError(res) - res.Body.Close() - // Ignore the error value from retry.backoff - // and return the one from last retry, as received from the CA. - if retry.backoff(ctx, req, res) != nil { - return nil, resErr - } - default: - defer res.Body.Close() - return nil, responseError(res) - } - } -} - -// post issues a signed POST request in JWS format using the provided key -// to the specified URL. -// It returns a non-error value only when ok reports true. -// -// post retries unsuccessful attempts according to c.RetryBackoff -// until the context is done or a non-retriable error is received. -// It uses postNoRetry to make individual requests. -func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) { - retry := c.retryTimer() - for { - res, req, err := c.postNoRetry(ctx, key, url, body) - if err != nil { - return nil, err - } - if ok(res) { - return res, nil - } - resErr := responseError(res) - res.Body.Close() - switch { - // Check for bad nonce before isRetriable because it may have been returned - // with an unretriable response code such as 400 Bad Request. - case isBadNonce(resErr): - // Consider any previously stored nonce values to be invalid. - c.clearNonces() - case !isRetriable(res.StatusCode): - return nil, resErr - } - retry.inc() - // Ignore the error value from retry.backoff - // and return the one from last retry, as received from the CA. - if err := retry.backoff(ctx, req, res); err != nil { - return nil, resErr - } - } -} - -// postNoRetry signs the body with the given key and POSTs it to the provided url. -// The body argument must be JSON-serializable. -// It is used by c.post to retry unsuccessful attempts. -func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) { - nonce, err := c.popNonce(ctx, url) - if err != nil { - return nil, nil, err - } - b, err := jwsEncodeJSON(body, key, nonce) - if err != nil { - return nil, nil, err - } - req, err := http.NewRequest("POST", url, bytes.NewReader(b)) - if err != nil { - return nil, nil, err - } - req.Header.Set("Content-Type", "application/jose+json") - res, err := c.doNoRetry(ctx, req) - if err != nil { - return nil, nil, err - } - c.addNonce(res.Header) - return res, req, nil -} - -// doNoRetry issues a request req, replacing its context (if any) with ctx. -func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) { - res, err := c.httpClient().Do(req.WithContext(ctx)) - if err != nil { - select { - case <-ctx.Done(): - // Prefer the unadorned context error. - // (The acme package had tests assuming this, previously from ctxhttp's - // behavior, predating net/http supporting contexts natively) - // TODO(bradfitz): reconsider this in the future. But for now this - // requires no test updates. - return nil, ctx.Err() - default: - return nil, err - } - } - return res, nil -} - -func (c *Client) httpClient() *http.Client { - if c.HTTPClient != nil { - return c.HTTPClient - } - return http.DefaultClient -} - -// isBadNonce reports whether err is an ACME "badnonce" error. -func isBadNonce(err error) bool { - // According to the spec badNonce is urn:ietf:params:acme:error:badNonce. - // However, ACME servers in the wild return their versions of the error. - // See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4 - // and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66. - ae, ok := err.(*Error) - return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") -} - -// isRetriable reports whether a request can be retried -// based on the response status code. -// -// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code. -// Callers should parse the response and check with isBadNonce. -func isRetriable(code int) bool { - return code <= 399 || code >= 500 || code == http.StatusTooManyRequests -} - -// responseError creates an error of Error type from resp. -func responseError(resp *http.Response) error { - // don't care if ReadAll returns an error: - // json.Unmarshal will fail in that case anyway - b, _ := ioutil.ReadAll(resp.Body) - e := &wireError{Status: resp.StatusCode} - if err := json.Unmarshal(b, e); err != nil { - // this is not a regular error response: - // populate detail with anything we received, - // e.Status will already contain HTTP response code value - e.Detail = string(b) - if e.Detail == "" { - e.Detail = resp.Status - } - } - return e.error(resp.Header) -} diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go deleted file mode 100644 index 6cbca25..0000000 --- a/vendor/golang.org/x/crypto/acme/jws.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package acme - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - _ "crypto/sha512" // need for EC keys - "encoding/base64" - "encoding/json" - "fmt" - "math/big" -) - -// jwsEncodeJSON signs claimset using provided key and a nonce. -// The result is serialized in JSON format. -// See https://tools.ietf.org/html/rfc7515#section-7. -func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) { - jwk, err := jwkEncode(key.Public()) - if err != nil { - return nil, err - } - alg, sha := jwsHasher(key) - if alg == "" || !sha.Available() { - return nil, ErrUnsupportedKey - } - phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce) - phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) - cs, err := json.Marshal(claimset) - if err != nil { - return nil, err - } - payload := base64.RawURLEncoding.EncodeToString(cs) - hash := sha.New() - hash.Write([]byte(phead + "." + payload)) - sig, err := jwsSign(key, sha, hash.Sum(nil)) - if err != nil { - return nil, err - } - - enc := struct { - Protected string `json:"protected"` - Payload string `json:"payload"` - Sig string `json:"signature"` - }{ - Protected: phead, - Payload: payload, - Sig: base64.RawURLEncoding.EncodeToString(sig), - } - return json.Marshal(&enc) -} - -// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. -// The result is also suitable for creating a JWK thumbprint. -// https://tools.ietf.org/html/rfc7517 -func jwkEncode(pub crypto.PublicKey) (string, error) { - switch pub := pub.(type) { - case *rsa.PublicKey: - // https://tools.ietf.org/html/rfc7518#section-6.3.1 - n := pub.N - e := big.NewInt(int64(pub.E)) - // Field order is important. - // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. - return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, - base64.RawURLEncoding.EncodeToString(e.Bytes()), - base64.RawURLEncoding.EncodeToString(n.Bytes()), - ), nil - case *ecdsa.PublicKey: - // https://tools.ietf.org/html/rfc7518#section-6.2.1 - p := pub.Curve.Params() - n := p.BitSize / 8 - if p.BitSize%8 != 0 { - n++ - } - x := pub.X.Bytes() - if n > len(x) { - x = append(make([]byte, n-len(x)), x...) - } - y := pub.Y.Bytes() - if n > len(y) { - y = append(make([]byte, n-len(y)), y...) - } - // Field order is important. - // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. - return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, - p.Name, - base64.RawURLEncoding.EncodeToString(x), - base64.RawURLEncoding.EncodeToString(y), - ), nil - } - return "", ErrUnsupportedKey -} - -// jwsSign signs the digest using the given key. -// It returns ErrUnsupportedKey if the key type is unknown. -// The hash is used only for RSA keys. -func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { - switch key := key.(type) { - case *rsa.PrivateKey: - return key.Sign(rand.Reader, digest, hash) - case *ecdsa.PrivateKey: - r, s, err := ecdsa.Sign(rand.Reader, key, digest) - if err != nil { - return nil, err - } - rb, sb := r.Bytes(), s.Bytes() - size := key.Params().BitSize / 8 - if size%8 > 0 { - size++ - } - sig := make([]byte, size*2) - copy(sig[size-len(rb):], rb) - copy(sig[size*2-len(sb):], sb) - return sig, nil - } - return nil, ErrUnsupportedKey -} - -// jwsHasher indicates suitable JWS algorithm name and a hash function -// to use for signing a digest with the provided key. -// It returns ("", 0) if the key is not supported. -func jwsHasher(key crypto.Signer) (string, crypto.Hash) { - switch key := key.(type) { - case *rsa.PrivateKey: - return "RS256", crypto.SHA256 - case *ecdsa.PrivateKey: - switch key.Params().Name { - case "P-256": - return "ES256", crypto.SHA256 - case "P-384": - return "ES384", crypto.SHA384 - case "P-521": - return "ES512", crypto.SHA512 - } - } - return "", 0 -} - -// JWKThumbprint creates a JWK thumbprint out of pub -// as specified in https://tools.ietf.org/html/rfc7638. -func JWKThumbprint(pub crypto.PublicKey) (string, error) { - jwk, err := jwkEncode(pub) - if err != nil { - return "", err - } - b := sha256.Sum256([]byte(jwk)) - return base64.RawURLEncoding.EncodeToString(b[:]), nil -} diff --git a/vendor/golang.org/x/crypto/acme/types.go b/vendor/golang.org/x/crypto/acme/types.go deleted file mode 100644 index 54792c0..0000000 --- a/vendor/golang.org/x/crypto/acme/types.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package acme - -import ( - "crypto" - "crypto/x509" - "errors" - "fmt" - "net/http" - "strings" - "time" -) - -// ACME server response statuses used to describe Authorization and Challenge states. -const ( - StatusUnknown = "unknown" - StatusPending = "pending" - StatusProcessing = "processing" - StatusValid = "valid" - StatusInvalid = "invalid" - StatusRevoked = "revoked" -) - -// CRLReasonCode identifies the reason for a certificate revocation. -type CRLReasonCode int - -// CRL reason codes as defined in RFC 5280. -const ( - CRLReasonUnspecified CRLReasonCode = 0 - CRLReasonKeyCompromise CRLReasonCode = 1 - CRLReasonCACompromise CRLReasonCode = 2 - CRLReasonAffiliationChanged CRLReasonCode = 3 - CRLReasonSuperseded CRLReasonCode = 4 - CRLReasonCessationOfOperation CRLReasonCode = 5 - CRLReasonCertificateHold CRLReasonCode = 6 - CRLReasonRemoveFromCRL CRLReasonCode = 8 - CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 - CRLReasonAACompromise CRLReasonCode = 10 -) - -// ErrUnsupportedKey is returned when an unsupported key type is encountered. -var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") - -// Error is an ACME error, defined in Problem Details for HTTP APIs doc -// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. -type Error struct { - // StatusCode is The HTTP status code generated by the origin server. - StatusCode int - // ProblemType is a URI reference that identifies the problem type, - // typically in a "urn:acme:error:xxx" form. - ProblemType string - // Detail is a human-readable explanation specific to this occurrence of the problem. - Detail string - // Header is the original server error response headers. - // It may be nil. - Header http.Header -} - -func (e *Error) Error() string { - return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) -} - -// AuthorizationError indicates that an authorization for an identifier -// did not succeed. -// It contains all errors from Challenge items of the failed Authorization. -type AuthorizationError struct { - // URI uniquely identifies the failed Authorization. - URI string - - // Identifier is an AuthzID.Value of the failed Authorization. - Identifier string - - // Errors is a collection of non-nil error values of Challenge items - // of the failed Authorization. - Errors []error -} - -func (a *AuthorizationError) Error() string { - e := make([]string, len(a.Errors)) - for i, err := range a.Errors { - e[i] = err.Error() - } - return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; ")) -} - -// RateLimit reports whether err represents a rate limit error and -// any Retry-After duration returned by the server. -// -// See the following for more details on rate limiting: -// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6 -func RateLimit(err error) (time.Duration, bool) { - e, ok := err.(*Error) - if !ok { - return 0, false - } - // Some CA implementations may return incorrect values. - // Use case-insensitive comparison. - if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") { - return 0, false - } - if e.Header == nil { - return 0, true - } - return retryAfter(e.Header.Get("Retry-After")), true -} - -// Account is a user account. It is associated with a private key. -type Account struct { - // URI is the account unique ID, which is also a URL used to retrieve - // account data from the CA. - URI string - - // Contact is a slice of contact info used during registration. - Contact []string - - // The terms user has agreed to. - // A value not matching CurrentTerms indicates that the user hasn't agreed - // to the actual Terms of Service of the CA. - AgreedTerms string - - // Actual terms of a CA. - CurrentTerms string - - // Authz is the authorization URL used to initiate a new authz flow. - Authz string - - // Authorizations is a URI from which a list of authorizations - // granted to this account can be fetched via a GET request. - Authorizations string - - // Certificates is a URI from which a list of certificates - // issued for this account can be fetched via a GET request. - Certificates string -} - -// Directory is ACME server discovery data. -type Directory struct { - // RegURL is an account endpoint URL, allowing for creating new - // and modifying existing accounts. - RegURL string - - // AuthzURL is used to initiate Identifier Authorization flow. - AuthzURL string - - // CertURL is a new certificate issuance endpoint URL. - CertURL string - - // RevokeURL is used to initiate a certificate revocation flow. - RevokeURL string - - // Term is a URI identifying the current terms of service. - Terms string - - // Website is an HTTP or HTTPS URL locating a website - // providing more information about the ACME server. - Website string - - // CAA consists of lowercase hostname elements, which the ACME server - // recognises as referring to itself for the purposes of CAA record validation - // as defined in RFC6844. - CAA []string -} - -// Challenge encodes a returned CA challenge. -// Its Error field may be non-nil if the challenge is part of an Authorization -// with StatusInvalid. -type Challenge struct { - // Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01". - Type string - - // URI is where a challenge response can be posted to. - URI string - - // Token is a random value that uniquely identifies the challenge. - Token string - - // Status identifies the status of this challenge. - Status string - - // Error indicates the reason for an authorization failure - // when this challenge was used. - // The type of a non-nil value is *Error. - Error error -} - -// Authorization encodes an authorization response. -type Authorization struct { - // URI uniquely identifies a authorization. - URI string - - // Status identifies the status of an authorization. - Status string - - // Identifier is what the account is authorized to represent. - Identifier AuthzID - - // Challenges that the client needs to fulfill in order to prove possession - // of the identifier (for pending authorizations). - // For final authorizations, the challenges that were used. - Challenges []*Challenge - - // A collection of sets of challenges, each of which would be sufficient - // to prove possession of the identifier. - // Clients must complete a set of challenges that covers at least one set. - // Challenges are identified by their indices in the challenges array. - // If this field is empty, the client needs to complete all challenges. - Combinations [][]int -} - -// AuthzID is an identifier that an account is authorized to represent. -type AuthzID struct { - Type string // The type of identifier, e.g. "dns". - Value string // The identifier itself, e.g. "example.org". -} - -// wireAuthz is ACME JSON representation of Authorization objects. -type wireAuthz struct { - Status string - Challenges []wireChallenge - Combinations [][]int - Identifier struct { - Type string - Value string - } -} - -func (z *wireAuthz) authorization(uri string) *Authorization { - a := &Authorization{ - URI: uri, - Status: z.Status, - Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, - Combinations: z.Combinations, // shallow copy - Challenges: make([]*Challenge, len(z.Challenges)), - } - for i, v := range z.Challenges { - a.Challenges[i] = v.challenge() - } - return a -} - -func (z *wireAuthz) error(uri string) *AuthorizationError { - err := &AuthorizationError{ - URI: uri, - Identifier: z.Identifier.Value, - } - for _, raw := range z.Challenges { - if raw.Error != nil { - err.Errors = append(err.Errors, raw.Error.error(nil)) - } - } - return err -} - -// wireChallenge is ACME JSON challenge representation. -type wireChallenge struct { - URI string `json:"uri"` - Type string - Token string - Status string - Error *wireError -} - -func (c *wireChallenge) challenge() *Challenge { - v := &Challenge{ - URI: c.URI, - Type: c.Type, - Token: c.Token, - Status: c.Status, - } - if v.Status == "" { - v.Status = StatusPending - } - if c.Error != nil { - v.Error = c.Error.error(nil) - } - return v -} - -// wireError is a subset of fields of the Problem Details object -// as described in https://tools.ietf.org/html/rfc7807#section-3.1. -type wireError struct { - Status int - Type string - Detail string -} - -func (e *wireError) error(h http.Header) *Error { - return &Error{ - StatusCode: e.Status, - ProblemType: e.Type, - Detail: e.Detail, - Header: h, - } -} - -// CertOption is an optional argument type for the TLS ChallengeCert methods for -// customizing a temporary certificate for TLS-based challenges. -type CertOption interface { - privateCertOpt() -} - -// WithKey creates an option holding a private/public key pair. -// The private part signs a certificate, and the public part represents the signee. -func WithKey(key crypto.Signer) CertOption { - return &certOptKey{key} -} - -type certOptKey struct { - key crypto.Signer -} - -func (*certOptKey) privateCertOpt() {} - -// WithTemplate creates an option for specifying a certificate template. -// See x509.CreateCertificate for template usage details. -// -// In TLS ChallengeCert methods, the template is also used as parent, -// resulting in a self-signed certificate. -// The DNSNames field of t is always overwritten for tls-sni challenge certs. -func WithTemplate(t *x509.Certificate) CertOption { - return (*certOptTemplate)(t) -} - -type certOptTemplate x509.Certificate - -func (*certOptTemplate) privateCertOpt() {} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go deleted file mode 100644 index cd0a8ac..0000000 --- a/vendor/golang.org/x/net/html/atom/atom.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atom provides integer codes (also known as atoms) for a fixed set of -// frequently occurring HTML strings: tag names and attribute keys such as "p" -// and "id". -// -// Sharing an atom's name between all elements with the same tag can result in -// fewer string allocations when tokenizing and parsing HTML. Integer -// comparisons are also generally faster than string comparisons. -// -// The value of an atom's particular code is not guaranteed to stay the same -// between versions of this package. Neither is any ordering guaranteed: -// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to -// be dense. The only guarantees are that e.g. looking up "div" will yield -// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. -package atom // import "golang.org/x/net/html/atom" - -// Atom is an integer code for a string. The zero value maps to "". -type Atom uint32 - -// String returns the atom's name. -func (a Atom) String() string { - start := uint32(a >> 8) - n := uint32(a & 0xff) - if start+n > uint32(len(atomText)) { - return "" - } - return atomText[start : start+n] -} - -func (a Atom) string() string { - return atomText[a>>8 : a>>8+a&0xff] -} - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s []byte) uint32 { - for i := range s { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -func match(s string, t []byte) bool { - for i, c := range t { - if s[i] != c { - return false - } - } - return true -} - -// Lookup returns the atom whose name is s. It returns zero if there is no -// such atom. The lookup is case sensitive. -func Lookup(s []byte) Atom { - if len(s) == 0 || len(s) > maxAtomLen { - return 0 - } - h := fnv(hash0, s) - if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - return 0 -} - -// String returns a string whose contents are equal to s. In that sense, it is -// equivalent to string(s) but may be more efficient. -func String(s []byte) string { - if a := Lookup(s); a != 0 { - return a.String() - } - return string(s) -} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go deleted file mode 100644 index 6bfa866..0000000 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ /dev/null @@ -1,648 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go. -// Invoke as -// -// go run gen.go |gofmt >table.go -// go run gen.go -test |gofmt >table_test.go - -import ( - "flag" - "fmt" - "math/rand" - "os" - "sort" - "strings" -) - -// identifier converts s to a Go exported identifier. -// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". -func identifier(s string) string { - b := make([]byte, 0, len(s)) - cap := true - for _, c := range s { - if c == '-' { - cap = true - continue - } - if cap && 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - cap = false - b = append(b, byte(c)) - } - return string(b) -} - -var test = flag.Bool("test", false, "generate table_test.go") - -func main() { - flag.Parse() - - var all []string - all = append(all, elements...) - all = append(all, attributes...) - all = append(all, eventHandlers...) - all = append(all, extra...) - sort.Strings(all) - - if *test { - fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n") - fmt.Printf("package atom\n\n") - fmt.Printf("var testAtomList = []string{\n") - for _, s := range all { - fmt.Printf("\t%q,\n", s) - } - fmt.Printf("}\n") - return - } - - // uniq - lists have dups - // compute max len too - maxLen := 0 - w := 0 - for _, s := range all { - if w == 0 || all[w-1] != s { - if maxLen < len(s) { - maxLen = len(s) - } - all[w] = s - w++ - } - } - all = all[:w] - - // Find hash that minimizes table size. - var best *table - for i := 0; i < 1000000; i++ { - if best != nil && 1<<(best.k-1) < len(all) { - break - } - h := rand.Uint32() - for k := uint(0); k <= 16; k++ { - if best != nil && k >= best.k { - break - } - var t table - if t.init(h, k, all) { - best = &t - break - } - } - } - if best == nil { - fmt.Fprintf(os.Stderr, "failed to construct string table\n") - os.Exit(1) - } - - // Lay out strings, using overlaps when possible. - layout := append([]string{}, all...) - - // Remove strings that are substrings of other strings - for changed := true; changed; { - changed = false - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i != j && t != "" && strings.Contains(s, t) { - changed = true - layout[j] = "" - } - } - } - } - - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } - } - if bestk > 0 { - layout[besti] += layout[bestj][bestk:] - layout[bestj] = "" - continue - } - break - } - - text := strings.Join(layout, "") - - atom := map[string]uint32{} - for _, s := range all { - off := strings.Index(text, s) - if off < 0 { - panic("lost string " + s) - } - atom[s] = uint32(off<<8 | len(s)) - } - - // Generate the Go code. - fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Printf("package atom\n\nconst (\n") - for _, s := range all { - fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s]) - } - fmt.Printf(")\n\n") - - fmt.Printf("const hash0 = %#x\n\n", best.h0) - fmt.Printf("const maxAtomLen = %d\n\n", maxLen) - - fmt.Printf("var table = [1<<%d]Atom{\n", best.k) - for i, s := range best.tab { - if s == "" { - continue - } - fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s) - } - fmt.Printf("}\n") - datasize := (1 << best.k) * 4 - - fmt.Printf("const atomText =\n") - textsize := len(text) - for len(text) > 60 { - fmt.Printf("\t%q +\n", text[:60]) - text = text[60:] - } - fmt.Printf("\t%q\n\n", text) - - fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) -} - -type byLen []string - -func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } -func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byLen) Len() int { return len(x) } - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// A table represents an attempt at constructing the lookup table. -// The lookup table uses cuckoo hashing, meaning that each string -// can be found in one of two positions. -type table struct { - h0 uint32 - k uint - mask uint32 - tab []string -} - -// hash returns the two hashes for s. -func (t *table) hash(s string) (h1, h2 uint32) { - h := fnv(t.h0, s) - h1 = h & t.mask - h2 = (h >> 16) & t.mask - return -} - -// init initializes the table with the given parameters. -// h0 is the initial hash value, -// k is the number of bits of hash value to use, and -// x is the list of strings to store in the table. -// init returns false if the table cannot be constructed. -func (t *table) init(h0 uint32, k uint, x []string) bool { - t.h0 = h0 - t.k = k - t.tab = make([]string, 1< len(t.tab) { - return false - } - s := t.tab[i] - h1, h2 := t.hash(s) - j := h1 + h2 - i - if t.tab[j] != "" && !t.push(j, depth+1) { - return false - } - t.tab[j] = s - return true -} - -// The lists of element names and attribute keys were taken from -// https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 21 February 2015" version. - -var elements = []string{ - "a", - "abbr", - "address", - "area", - "article", - "aside", - "audio", - "b", - "base", - "bdi", - "bdo", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "cite", - "code", - "col", - "colgroup", - "command", - "data", - "datalist", - "dd", - "del", - "details", - "dfn", - "dialog", - "div", - "dl", - "dt", - "em", - "embed", - "fieldset", - "figcaption", - "figure", - "footer", - "form", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "hgroup", - "hr", - "html", - "i", - "iframe", - "img", - "input", - "ins", - "kbd", - "keygen", - "label", - "legend", - "li", - "link", - "map", - "mark", - "menu", - "menuitem", - "meta", - "meter", - "nav", - "noscript", - "object", - "ol", - "optgroup", - "option", - "output", - "p", - "param", - "pre", - "progress", - "q", - "rp", - "rt", - "ruby", - "s", - "samp", - "script", - "section", - "select", - "small", - "source", - "span", - "strong", - "style", - "sub", - "summary", - "sup", - "table", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "tr", - "track", - "u", - "ul", - "var", - "video", - "wbr", -} - -// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 - -var attributes = []string{ - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "alt", - "async", - "autocomplete", - "autofocus", - "autoplay", - "challenge", - "charset", - "checked", - "cite", - "class", - "cols", - "colspan", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "datetime", - "default", - "defer", - "dir", - "dirname", - "disabled", - "download", - "draggable", - "dropzone", - "enctype", - "for", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "headers", - "height", - "hidden", - "high", - "href", - "hreflang", - "http-equiv", - "icon", - "id", - "inputmode", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "keytype", - "kind", - "label", - "lang", - "list", - "loop", - "low", - "manifest", - "max", - "maxlength", - "media", - "mediagroup", - "method", - "min", - "minlength", - "multiple", - "muted", - "name", - "novalidate", - "open", - "optimum", - "pattern", - "ping", - "placeholder", - "poster", - "preload", - "radiogroup", - "readonly", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "sandbox", - "spellcheck", - "scope", - "scoped", - "seamless", - "selected", - "shape", - "size", - "sizes", - "sortable", - "sorted", - "span", - "src", - "srcdoc", - "srclang", - "start", - "step", - "style", - "tabindex", - "target", - "title", - "translate", - "type", - "typemustmatch", - "usemap", - "value", - "width", - "wrap", -} - -var eventHandlers = []string{ - "onabort", - "onautocomplete", - "onautocompleteerror", - "onafterprint", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncuechange", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadstart", - "onmessage", - "onmousedown", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onscroll", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunload", - "onvolumechange", - "onwaiting", -} - -// extra are ad-hoc values not covered by any of the lists above. -var extra = []string{ - "align", - "annotation", - "annotation-xml", - "applet", - "basefont", - "bgsound", - "big", - "blink", - "center", - "color", - "desc", - "face", - "font", - "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. - "foreignobject", - "frame", - "frameset", - "image", - "isindex", - "listing", - "malignmark", - "marquee", - "math", - "mglyph", - "mi", - "mn", - "mo", - "ms", - "mtext", - "nobr", - "noembed", - "noframes", - "plaintext", - "prompt", - "public", - "spacer", - "strike", - "svg", - "system", - "tt", - "xmp", -} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go deleted file mode 100644 index 2605ba3..0000000 --- a/vendor/golang.org/x/net/html/atom/table.go +++ /dev/null @@ -1,713 +0,0 @@ -// generated by go run gen.go; DO NOT EDIT - -package atom - -const ( - A Atom = 0x1 - Abbr Atom = 0x4 - Accept Atom = 0x2106 - AcceptCharset Atom = 0x210e - Accesskey Atom = 0x3309 - Action Atom = 0x1f606 - Address Atom = 0x4f307 - Align Atom = 0x1105 - Alt Atom = 0x4503 - Annotation Atom = 0x1670a - AnnotationXml Atom = 0x1670e - Applet Atom = 0x2b306 - Area Atom = 0x2fa04 - Article Atom = 0x38807 - Aside Atom = 0x8305 - Async Atom = 0x7b05 - Audio Atom = 0xa605 - Autocomplete Atom = 0x1fc0c - Autofocus Atom = 0xb309 - Autoplay Atom = 0xce08 - B Atom = 0x101 - Base Atom = 0xd604 - Basefont Atom = 0xd608 - Bdi Atom = 0x1a03 - Bdo Atom = 0xe703 - Bgsound Atom = 0x11807 - Big Atom = 0x12403 - Blink Atom = 0x12705 - Blockquote Atom = 0x12c0a - Body Atom = 0x2f04 - Br Atom = 0x202 - Button Atom = 0x13606 - Canvas Atom = 0x7f06 - Caption Atom = 0x1bb07 - Center Atom = 0x5b506 - Challenge Atom = 0x21f09 - Charset Atom = 0x2807 - Checked Atom = 0x32807 - Cite Atom = 0x3c804 - Class Atom = 0x4de05 - Code Atom = 0x14904 - Col Atom = 0x15003 - Colgroup Atom = 0x15008 - Color Atom = 0x15d05 - Cols Atom = 0x16204 - Colspan Atom = 0x16207 - Command Atom = 0x17507 - Content Atom = 0x42307 - Contenteditable Atom = 0x4230f - Contextmenu Atom = 0x3310b - Controls Atom = 0x18808 - Coords Atom = 0x19406 - Crossorigin Atom = 0x19f0b - Data Atom = 0x44a04 - Datalist Atom = 0x44a08 - Datetime Atom = 0x23c08 - Dd Atom = 0x26702 - Default Atom = 0x8607 - Defer Atom = 0x14b05 - Del Atom = 0x3ef03 - Desc Atom = 0x4db04 - Details Atom = 0x4807 - Dfn Atom = 0x6103 - Dialog Atom = 0x1b06 - Dir Atom = 0x6903 - Dirname Atom = 0x6907 - Disabled Atom = 0x10c08 - Div Atom = 0x11303 - Dl Atom = 0x11e02 - Download Atom = 0x40008 - Draggable Atom = 0x17b09 - Dropzone Atom = 0x39108 - Dt Atom = 0x50902 - Em Atom = 0x6502 - Embed Atom = 0x6505 - Enctype Atom = 0x21107 - Face Atom = 0x5b304 - Fieldset Atom = 0x1b008 - Figcaption Atom = 0x1b80a - Figure Atom = 0x1cc06 - Font Atom = 0xda04 - Footer Atom = 0x8d06 - For Atom = 0x1d803 - ForeignObject Atom = 0x1d80d - Foreignobject Atom = 0x1e50d - Form Atom = 0x1f204 - Formaction Atom = 0x1f20a - Formenctype Atom = 0x20d0b - Formmethod Atom = 0x2280a - Formnovalidate Atom = 0x2320e - Formtarget Atom = 0x2470a - Frame Atom = 0x9a05 - Frameset Atom = 0x9a08 - H1 Atom = 0x26e02 - H2 Atom = 0x29402 - H3 Atom = 0x2a702 - H4 Atom = 0x2e902 - H5 Atom = 0x2f302 - H6 Atom = 0x50b02 - Head Atom = 0x2d504 - Header Atom = 0x2d506 - Headers Atom = 0x2d507 - Height Atom = 0x25106 - Hgroup Atom = 0x25906 - Hidden Atom = 0x26506 - High Atom = 0x26b04 - Hr Atom = 0x27002 - Href Atom = 0x27004 - Hreflang Atom = 0x27008 - Html Atom = 0x25504 - HttpEquiv Atom = 0x2780a - I Atom = 0x601 - Icon Atom = 0x42204 - Id Atom = 0x8502 - Iframe Atom = 0x29606 - Image Atom = 0x29c05 - Img Atom = 0x2a103 - Input Atom = 0x3e805 - Inputmode Atom = 0x3e809 - Ins Atom = 0x1a803 - Isindex Atom = 0x2a907 - Ismap Atom = 0x2b005 - Itemid Atom = 0x33c06 - Itemprop Atom = 0x3c908 - Itemref Atom = 0x5ad07 - Itemscope Atom = 0x2b909 - Itemtype Atom = 0x2c308 - Kbd Atom = 0x1903 - Keygen Atom = 0x3906 - Keytype Atom = 0x53707 - Kind Atom = 0x10904 - Label Atom = 0xf005 - Lang Atom = 0x27404 - Legend Atom = 0x18206 - Li Atom = 0x1202 - Link Atom = 0x12804 - List Atom = 0x44e04 - Listing Atom = 0x44e07 - Loop Atom = 0xf404 - Low Atom = 0x11f03 - Malignmark Atom = 0x100a - Manifest Atom = 0x5f108 - Map Atom = 0x2b203 - Mark Atom = 0x1604 - Marquee Atom = 0x2cb07 - Math Atom = 0x2d204 - Max Atom = 0x2e103 - Maxlength Atom = 0x2e109 - Media Atom = 0x6e05 - Mediagroup Atom = 0x6e0a - Menu Atom = 0x33804 - Menuitem Atom = 0x33808 - Meta Atom = 0x45d04 - Meter Atom = 0x24205 - Method Atom = 0x22c06 - Mglyph Atom = 0x2a206 - Mi Atom = 0x2eb02 - Min Atom = 0x2eb03 - Minlength Atom = 0x2eb09 - Mn Atom = 0x23502 - Mo Atom = 0x3ed02 - Ms Atom = 0x2bc02 - Mtext Atom = 0x2f505 - Multiple Atom = 0x30308 - Muted Atom = 0x30b05 - Name Atom = 0x6c04 - Nav Atom = 0x3e03 - Nobr Atom = 0x5704 - Noembed Atom = 0x6307 - Noframes Atom = 0x9808 - Noscript Atom = 0x3d208 - Novalidate Atom = 0x2360a - Object Atom = 0x1ec06 - Ol Atom = 0xc902 - Onabort Atom = 0x13a07 - Onafterprint Atom = 0x1c00c - Onautocomplete Atom = 0x1fa0e - Onautocompleteerror Atom = 0x1fa13 - Onbeforeprint Atom = 0x6040d - Onbeforeunload Atom = 0x4e70e - Onblur Atom = 0xaa06 - Oncancel Atom = 0xe908 - Oncanplay Atom = 0x28509 - Oncanplaythrough Atom = 0x28510 - Onchange Atom = 0x3a708 - Onclick Atom = 0x31007 - Onclose Atom = 0x31707 - Oncontextmenu Atom = 0x32f0d - Oncuechange Atom = 0x3420b - Ondblclick Atom = 0x34d0a - Ondrag Atom = 0x35706 - Ondragend Atom = 0x35709 - Ondragenter Atom = 0x3600b - Ondragleave Atom = 0x36b0b - Ondragover Atom = 0x3760a - Ondragstart Atom = 0x3800b - Ondrop Atom = 0x38f06 - Ondurationchange Atom = 0x39f10 - Onemptied Atom = 0x39609 - Onended Atom = 0x3af07 - Onerror Atom = 0x3b607 - Onfocus Atom = 0x3bd07 - Onhashchange Atom = 0x3da0c - Oninput Atom = 0x3e607 - Oninvalid Atom = 0x3f209 - Onkeydown Atom = 0x3fb09 - Onkeypress Atom = 0x4080a - Onkeyup Atom = 0x41807 - Onlanguagechange Atom = 0x43210 - Onload Atom = 0x44206 - Onloadeddata Atom = 0x4420c - Onloadedmetadata Atom = 0x45510 - Onloadstart Atom = 0x46b0b - Onmessage Atom = 0x47609 - Onmousedown Atom = 0x47f0b - Onmousemove Atom = 0x48a0b - Onmouseout Atom = 0x4950a - Onmouseover Atom = 0x4a20b - Onmouseup Atom = 0x4ad09 - Onmousewheel Atom = 0x4b60c - Onoffline Atom = 0x4c209 - Ononline Atom = 0x4cb08 - Onpagehide Atom = 0x4d30a - Onpageshow Atom = 0x4fe0a - Onpause Atom = 0x50d07 - Onplay Atom = 0x51706 - Onplaying Atom = 0x51709 - Onpopstate Atom = 0x5200a - Onprogress Atom = 0x52a0a - Onratechange Atom = 0x53e0c - Onreset Atom = 0x54a07 - Onresize Atom = 0x55108 - Onscroll Atom = 0x55f08 - Onseeked Atom = 0x56708 - Onseeking Atom = 0x56f09 - Onselect Atom = 0x57808 - Onshow Atom = 0x58206 - Onsort Atom = 0x58b06 - Onstalled Atom = 0x59509 - Onstorage Atom = 0x59e09 - Onsubmit Atom = 0x5a708 - Onsuspend Atom = 0x5bb09 - Ontimeupdate Atom = 0xdb0c - Ontoggle Atom = 0x5c408 - Onunload Atom = 0x5cc08 - Onvolumechange Atom = 0x5d40e - Onwaiting Atom = 0x5e209 - Open Atom = 0x3cf04 - Optgroup Atom = 0xf608 - Optimum Atom = 0x5eb07 - Option Atom = 0x60006 - Output Atom = 0x49c06 - P Atom = 0xc01 - Param Atom = 0xc05 - Pattern Atom = 0x5107 - Ping Atom = 0x7704 - Placeholder Atom = 0xc30b - Plaintext Atom = 0xfd09 - Poster Atom = 0x15706 - Pre Atom = 0x25e03 - Preload Atom = 0x25e07 - Progress Atom = 0x52c08 - Prompt Atom = 0x5fa06 - Public Atom = 0x41e06 - Q Atom = 0x13101 - Radiogroup Atom = 0x30a - Readonly Atom = 0x2fb08 - Rel Atom = 0x25f03 - Required Atom = 0x1d008 - Reversed Atom = 0x5a08 - Rows Atom = 0x9204 - Rowspan Atom = 0x9207 - Rp Atom = 0x1c602 - Rt Atom = 0x13f02 - Ruby Atom = 0xaf04 - S Atom = 0x2c01 - Samp Atom = 0x4e04 - Sandbox Atom = 0xbb07 - Scope Atom = 0x2bd05 - Scoped Atom = 0x2bd06 - Script Atom = 0x3d406 - Seamless Atom = 0x31c08 - Section Atom = 0x4e207 - Select Atom = 0x57a06 - Selected Atom = 0x57a08 - Shape Atom = 0x4f905 - Size Atom = 0x55504 - Sizes Atom = 0x55505 - Small Atom = 0x18f05 - Sortable Atom = 0x58d08 - Sorted Atom = 0x19906 - Source Atom = 0x1aa06 - Spacer Atom = 0x2db06 - Span Atom = 0x9504 - Spellcheck Atom = 0x3230a - Src Atom = 0x3c303 - Srcdoc Atom = 0x3c306 - Srclang Atom = 0x41107 - Start Atom = 0x38605 - Step Atom = 0x5f704 - Strike Atom = 0x53306 - Strong Atom = 0x55906 - Style Atom = 0x61105 - Sub Atom = 0x5a903 - Summary Atom = 0x61607 - Sup Atom = 0x61d03 - Svg Atom = 0x62003 - System Atom = 0x62306 - Tabindex Atom = 0x46308 - Table Atom = 0x42d05 - Target Atom = 0x24b06 - Tbody Atom = 0x2e05 - Td Atom = 0x4702 - Template Atom = 0x62608 - Textarea Atom = 0x2f608 - Tfoot Atom = 0x8c05 - Th Atom = 0x22e02 - Thead Atom = 0x2d405 - Time Atom = 0xdd04 - Title Atom = 0xa105 - Tr Atom = 0x10502 - Track Atom = 0x10505 - Translate Atom = 0x14009 - Tt Atom = 0x5302 - Type Atom = 0x21404 - Typemustmatch Atom = 0x2140d - U Atom = 0xb01 - Ul Atom = 0x8a02 - Usemap Atom = 0x51106 - Value Atom = 0x4005 - Var Atom = 0x11503 - Video Atom = 0x28105 - Wbr Atom = 0x12103 - Width Atom = 0x50705 - Wrap Atom = 0x58704 - Xmp Atom = 0xc103 -) - -const hash0 = 0xc17da63e - -const maxAtomLen = 19 - -var table = [1 << 9]Atom{ - 0x1: 0x48a0b, // onmousemove - 0x2: 0x5e209, // onwaiting - 0x3: 0x1fa13, // onautocompleteerror - 0x4: 0x5fa06, // prompt - 0x7: 0x5eb07, // optimum - 0x8: 0x1604, // mark - 0xa: 0x5ad07, // itemref - 0xb: 0x4fe0a, // onpageshow - 0xc: 0x57a06, // select - 0xd: 0x17b09, // draggable - 0xe: 0x3e03, // nav - 0xf: 0x17507, // command - 0x11: 0xb01, // u - 0x14: 0x2d507, // headers - 0x15: 0x44a08, // datalist - 0x17: 0x4e04, // samp - 0x1a: 0x3fb09, // onkeydown - 0x1b: 0x55f08, // onscroll - 0x1c: 0x15003, // col - 0x20: 0x3c908, // itemprop - 0x21: 0x2780a, // http-equiv - 0x22: 0x61d03, // sup - 0x24: 0x1d008, // required - 0x2b: 0x25e07, // preload - 0x2c: 0x6040d, // onbeforeprint - 0x2d: 0x3600b, // ondragenter - 0x2e: 0x50902, // dt - 0x2f: 0x5a708, // onsubmit - 0x30: 0x27002, // hr - 0x31: 0x32f0d, // oncontextmenu - 0x33: 0x29c05, // image - 0x34: 0x50d07, // onpause - 0x35: 0x25906, // hgroup - 0x36: 0x7704, // ping - 0x37: 0x57808, // onselect - 0x3a: 0x11303, // div - 0x3b: 0x1fa0e, // onautocomplete - 0x40: 0x2eb02, // mi - 0x41: 0x31c08, // seamless - 0x42: 0x2807, // charset - 0x43: 0x8502, // id - 0x44: 0x5200a, // onpopstate - 0x45: 0x3ef03, // del - 0x46: 0x2cb07, // marquee - 0x47: 0x3309, // accesskey - 0x49: 0x8d06, // footer - 0x4a: 0x44e04, // list - 0x4b: 0x2b005, // ismap - 0x51: 0x33804, // menu - 0x52: 0x2f04, // body - 0x55: 0x9a08, // frameset - 0x56: 0x54a07, // onreset - 0x57: 0x12705, // blink - 0x58: 0xa105, // title - 0x59: 0x38807, // article - 0x5b: 0x22e02, // th - 0x5d: 0x13101, // q - 0x5e: 0x3cf04, // open - 0x5f: 0x2fa04, // area - 0x61: 0x44206, // onload - 0x62: 0xda04, // font - 0x63: 0xd604, // base - 0x64: 0x16207, // colspan - 0x65: 0x53707, // keytype - 0x66: 0x11e02, // dl - 0x68: 0x1b008, // fieldset - 0x6a: 0x2eb03, // min - 0x6b: 0x11503, // var - 0x6f: 0x2d506, // header - 0x70: 0x13f02, // rt - 0x71: 0x15008, // colgroup - 0x72: 0x23502, // mn - 0x74: 0x13a07, // onabort - 0x75: 0x3906, // keygen - 0x76: 0x4c209, // onoffline - 0x77: 0x21f09, // challenge - 0x78: 0x2b203, // map - 0x7a: 0x2e902, // h4 - 0x7b: 0x3b607, // onerror - 0x7c: 0x2e109, // maxlength - 0x7d: 0x2f505, // mtext - 0x7e: 0xbb07, // sandbox - 0x7f: 0x58b06, // onsort - 0x80: 0x100a, // malignmark - 0x81: 0x45d04, // meta - 0x82: 0x7b05, // async - 0x83: 0x2a702, // h3 - 0x84: 0x26702, // dd - 0x85: 0x27004, // href - 0x86: 0x6e0a, // mediagroup - 0x87: 0x19406, // coords - 0x88: 0x41107, // srclang - 0x89: 0x34d0a, // ondblclick - 0x8a: 0x4005, // value - 0x8c: 0xe908, // oncancel - 0x8e: 0x3230a, // spellcheck - 0x8f: 0x9a05, // frame - 0x91: 0x12403, // big - 0x94: 0x1f606, // action - 0x95: 0x6903, // dir - 0x97: 0x2fb08, // readonly - 0x99: 0x42d05, // table - 0x9a: 0x61607, // summary - 0x9b: 0x12103, // wbr - 0x9c: 0x30a, // radiogroup - 0x9d: 0x6c04, // name - 0x9f: 0x62306, // system - 0xa1: 0x15d05, // color - 0xa2: 0x7f06, // canvas - 0xa3: 0x25504, // html - 0xa5: 0x56f09, // onseeking - 0xac: 0x4f905, // shape - 0xad: 0x25f03, // rel - 0xae: 0x28510, // oncanplaythrough - 0xaf: 0x3760a, // ondragover - 0xb0: 0x62608, // template - 0xb1: 0x1d80d, // foreignObject - 0xb3: 0x9204, // rows - 0xb6: 0x44e07, // listing - 0xb7: 0x49c06, // output - 0xb9: 0x3310b, // contextmenu - 0xbb: 0x11f03, // low - 0xbc: 0x1c602, // rp - 0xbd: 0x5bb09, // onsuspend - 0xbe: 0x13606, // button - 0xbf: 0x4db04, // desc - 0xc1: 0x4e207, // section - 0xc2: 0x52a0a, // onprogress - 0xc3: 0x59e09, // onstorage - 0xc4: 0x2d204, // math - 0xc5: 0x4503, // alt - 0xc7: 0x8a02, // ul - 0xc8: 0x5107, // pattern - 0xc9: 0x4b60c, // onmousewheel - 0xca: 0x35709, // ondragend - 0xcb: 0xaf04, // ruby - 0xcc: 0xc01, // p - 0xcd: 0x31707, // onclose - 0xce: 0x24205, // meter - 0xcf: 0x11807, // bgsound - 0xd2: 0x25106, // height - 0xd4: 0x101, // b - 0xd5: 0x2c308, // itemtype - 0xd8: 0x1bb07, // caption - 0xd9: 0x10c08, // disabled - 0xdb: 0x33808, // menuitem - 0xdc: 0x62003, // svg - 0xdd: 0x18f05, // small - 0xde: 0x44a04, // data - 0xe0: 0x4cb08, // ononline - 0xe1: 0x2a206, // mglyph - 0xe3: 0x6505, // embed - 0xe4: 0x10502, // tr - 0xe5: 0x46b0b, // onloadstart - 0xe7: 0x3c306, // srcdoc - 0xeb: 0x5c408, // ontoggle - 0xed: 0xe703, // bdo - 0xee: 0x4702, // td - 0xef: 0x8305, // aside - 0xf0: 0x29402, // h2 - 0xf1: 0x52c08, // progress - 0xf2: 0x12c0a, // blockquote - 0xf4: 0xf005, // label - 0xf5: 0x601, // i - 0xf7: 0x9207, // rowspan - 0xfb: 0x51709, // onplaying - 0xfd: 0x2a103, // img - 0xfe: 0xf608, // optgroup - 0xff: 0x42307, // content - 0x101: 0x53e0c, // onratechange - 0x103: 0x3da0c, // onhashchange - 0x104: 0x4807, // details - 0x106: 0x40008, // download - 0x109: 0x14009, // translate - 0x10b: 0x4230f, // contenteditable - 0x10d: 0x36b0b, // ondragleave - 0x10e: 0x2106, // accept - 0x10f: 0x57a08, // selected - 0x112: 0x1f20a, // formaction - 0x113: 0x5b506, // center - 0x115: 0x45510, // onloadedmetadata - 0x116: 0x12804, // link - 0x117: 0xdd04, // time - 0x118: 0x19f0b, // crossorigin - 0x119: 0x3bd07, // onfocus - 0x11a: 0x58704, // wrap - 0x11b: 0x42204, // icon - 0x11d: 0x28105, // video - 0x11e: 0x4de05, // class - 0x121: 0x5d40e, // onvolumechange - 0x122: 0xaa06, // onblur - 0x123: 0x2b909, // itemscope - 0x124: 0x61105, // style - 0x127: 0x41e06, // public - 0x129: 0x2320e, // formnovalidate - 0x12a: 0x58206, // onshow - 0x12c: 0x51706, // onplay - 0x12d: 0x3c804, // cite - 0x12e: 0x2bc02, // ms - 0x12f: 0xdb0c, // ontimeupdate - 0x130: 0x10904, // kind - 0x131: 0x2470a, // formtarget - 0x135: 0x3af07, // onended - 0x136: 0x26506, // hidden - 0x137: 0x2c01, // s - 0x139: 0x2280a, // formmethod - 0x13a: 0x3e805, // input - 0x13c: 0x50b02, // h6 - 0x13d: 0xc902, // ol - 0x13e: 0x3420b, // oncuechange - 0x13f: 0x1e50d, // foreignobject - 0x143: 0x4e70e, // onbeforeunload - 0x144: 0x2bd05, // scope - 0x145: 0x39609, // onemptied - 0x146: 0x14b05, // defer - 0x147: 0xc103, // xmp - 0x148: 0x39f10, // ondurationchange - 0x149: 0x1903, // kbd - 0x14c: 0x47609, // onmessage - 0x14d: 0x60006, // option - 0x14e: 0x2eb09, // minlength - 0x14f: 0x32807, // checked - 0x150: 0xce08, // autoplay - 0x152: 0x202, // br - 0x153: 0x2360a, // novalidate - 0x156: 0x6307, // noembed - 0x159: 0x31007, // onclick - 0x15a: 0x47f0b, // onmousedown - 0x15b: 0x3a708, // onchange - 0x15e: 0x3f209, // oninvalid - 0x15f: 0x2bd06, // scoped - 0x160: 0x18808, // controls - 0x161: 0x30b05, // muted - 0x162: 0x58d08, // sortable - 0x163: 0x51106, // usemap - 0x164: 0x1b80a, // figcaption - 0x165: 0x35706, // ondrag - 0x166: 0x26b04, // high - 0x168: 0x3c303, // src - 0x169: 0x15706, // poster - 0x16b: 0x1670e, // annotation-xml - 0x16c: 0x5f704, // step - 0x16d: 0x4, // abbr - 0x16e: 0x1b06, // dialog - 0x170: 0x1202, // li - 0x172: 0x3ed02, // mo - 0x175: 0x1d803, // for - 0x176: 0x1a803, // ins - 0x178: 0x55504, // size - 0x179: 0x43210, // onlanguagechange - 0x17a: 0x8607, // default - 0x17b: 0x1a03, // bdi - 0x17c: 0x4d30a, // onpagehide - 0x17d: 0x6907, // dirname - 0x17e: 0x21404, // type - 0x17f: 0x1f204, // form - 0x181: 0x28509, // oncanplay - 0x182: 0x6103, // dfn - 0x183: 0x46308, // tabindex - 0x186: 0x6502, // em - 0x187: 0x27404, // lang - 0x189: 0x39108, // dropzone - 0x18a: 0x4080a, // onkeypress - 0x18b: 0x23c08, // datetime - 0x18c: 0x16204, // cols - 0x18d: 0x1, // a - 0x18e: 0x4420c, // onloadeddata - 0x190: 0xa605, // audio - 0x192: 0x2e05, // tbody - 0x193: 0x22c06, // method - 0x195: 0xf404, // loop - 0x196: 0x29606, // iframe - 0x198: 0x2d504, // head - 0x19e: 0x5f108, // manifest - 0x19f: 0xb309, // autofocus - 0x1a0: 0x14904, // code - 0x1a1: 0x55906, // strong - 0x1a2: 0x30308, // multiple - 0x1a3: 0xc05, // param - 0x1a6: 0x21107, // enctype - 0x1a7: 0x5b304, // face - 0x1a8: 0xfd09, // plaintext - 0x1a9: 0x26e02, // h1 - 0x1aa: 0x59509, // onstalled - 0x1ad: 0x3d406, // script - 0x1ae: 0x2db06, // spacer - 0x1af: 0x55108, // onresize - 0x1b0: 0x4a20b, // onmouseover - 0x1b1: 0x5cc08, // onunload - 0x1b2: 0x56708, // onseeked - 0x1b4: 0x2140d, // typemustmatch - 0x1b5: 0x1cc06, // figure - 0x1b6: 0x4950a, // onmouseout - 0x1b7: 0x25e03, // pre - 0x1b8: 0x50705, // width - 0x1b9: 0x19906, // sorted - 0x1bb: 0x5704, // nobr - 0x1be: 0x5302, // tt - 0x1bf: 0x1105, // align - 0x1c0: 0x3e607, // oninput - 0x1c3: 0x41807, // onkeyup - 0x1c6: 0x1c00c, // onafterprint - 0x1c7: 0x210e, // accept-charset - 0x1c8: 0x33c06, // itemid - 0x1c9: 0x3e809, // inputmode - 0x1cb: 0x53306, // strike - 0x1cc: 0x5a903, // sub - 0x1cd: 0x10505, // track - 0x1ce: 0x38605, // start - 0x1d0: 0xd608, // basefont - 0x1d6: 0x1aa06, // source - 0x1d7: 0x18206, // legend - 0x1d8: 0x2d405, // thead - 0x1da: 0x8c05, // tfoot - 0x1dd: 0x1ec06, // object - 0x1de: 0x6e05, // media - 0x1df: 0x1670a, // annotation - 0x1e0: 0x20d0b, // formenctype - 0x1e2: 0x3d208, // noscript - 0x1e4: 0x55505, // sizes - 0x1e5: 0x1fc0c, // autocomplete - 0x1e6: 0x9504, // span - 0x1e7: 0x9808, // noframes - 0x1e8: 0x24b06, // target - 0x1e9: 0x38f06, // ondrop - 0x1ea: 0x2b306, // applet - 0x1ec: 0x5a08, // reversed - 0x1f0: 0x2a907, // isindex - 0x1f3: 0x27008, // hreflang - 0x1f5: 0x2f302, // h5 - 0x1f6: 0x4f307, // address - 0x1fa: 0x2e103, // max - 0x1fb: 0xc30b, // placeholder - 0x1fc: 0x2f608, // textarea - 0x1fe: 0x4ad09, // onmouseup - 0x1ff: 0x3800b, // ondragstart -} - -const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" + - "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" + - "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" + - "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" + - "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" + - "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" + - "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" + - "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" + - "bjectforeignobjectformactionautocompleteerrorformenctypemust" + - "matchallengeformmethodformnovalidatetimeterformtargetheightm" + - "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" + - "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" + - "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" + - "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" + - "hangeondblclickondragendondragenterondragleaveondragoverondr" + - "agstarticleondropzonemptiedondurationchangeonendedonerroronf" + - "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" + - "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" + - "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" + - "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" + - "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" + - "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" + - "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" + - "ollonseekedonseekingonselectedonshowraponsortableonstalledon" + - "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" + - "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" + - "mmarysupsvgsystemplate" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go deleted file mode 100644 index 52f651f..0000000 --- a/vendor/golang.org/x/net/html/const.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// Section 12.2.3.2 of the HTML5 specification says "The following elements -// have varying levels of special parsing rules". -// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements -var isSpecialElementMap = map[string]bool{ - "address": true, - "applet": true, - "area": true, - "article": true, - "aside": true, - "base": true, - "basefont": true, - "bgsound": true, - "blockquote": true, - "body": true, - "br": true, - "button": true, - "caption": true, - "center": true, - "col": true, - "colgroup": true, - "dd": true, - "details": true, - "dir": true, - "div": true, - "dl": true, - "dt": true, - "embed": true, - "fieldset": true, - "figcaption": true, - "figure": true, - "footer": true, - "form": true, - "frame": true, - "frameset": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "header": true, - "hgroup": true, - "hr": true, - "html": true, - "iframe": true, - "img": true, - "input": true, - "isindex": true, - "li": true, - "link": true, - "listing": true, - "marquee": true, - "menu": true, - "meta": true, - "nav": true, - "noembed": true, - "noframes": true, - "noscript": true, - "object": true, - "ol": true, - "p": true, - "param": true, - "plaintext": true, - "pre": true, - "script": true, - "section": true, - "select": true, - "source": true, - "style": true, - "summary": true, - "table": true, - "tbody": true, - "td": true, - "template": true, - "textarea": true, - "tfoot": true, - "th": true, - "thead": true, - "title": true, - "tr": true, - "track": true, - "ul": true, - "wbr": true, - "xmp": true, -} - -func isSpecialElement(element *Node) bool { - switch element.Namespace { - case "", "html": - return isSpecialElementMap[element.Data] - case "svg": - return element.Data == "foreignObject" - } - return false -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go deleted file mode 100644 index 94f4968..0000000 --- a/vendor/golang.org/x/net/html/doc.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package html implements an HTML5-compliant tokenizer and parser. - -Tokenization is done by creating a Tokenizer for an io.Reader r. It is the -caller's responsibility to ensure that r provides UTF-8 encoded HTML. - - z := html.NewTokenizer(r) - -Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), -which parses the next token and returns its type, or an error: - - for { - tt := z.Next() - if tt == html.ErrorToken { - // ... - return ... - } - // Process the current token. - } - -There are two APIs for retrieving the current token. The high-level API is to -call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs -allow optionally calling Raw after Next but before Token, Text, TagName, or -TagAttr. In EBNF notation, the valid call sequence per token is: - - Next {Raw} [ Token | Text | TagName {TagAttr} ] - -Token returns an independent data structure that completely describes a token. -Entities (such as "<") are unescaped, tag names and attribute keys are -lower-cased, and attributes are collected into a []Attribute. For example: - - for { - if z.Next() == html.ErrorToken { - // Returning io.EOF indicates success. - return z.Err() - } - emitToken(z.Token()) - } - -The low-level API performs fewer allocations and copies, but the contents of -the []byte values returned by Text, TagName and TagAttr may change on the next -call to Next. For example, to extract an HTML page's anchor text: - - depth := 0 - for { - tt := z.Next() - switch tt { - case ErrorToken: - return z.Err() - case TextToken: - if depth > 0 { - // emitBytes should copy the []byte it receives, - // if it doesn't process it immediately. - emitBytes(z.Text()) - } - case StartTagToken, EndTagToken: - tn, _ := z.TagName() - if len(tn) == 1 && tn[0] == 'a' { - if tt == StartTagToken { - depth++ - } else { - depth-- - } - } - } - } - -Parsing is done by calling Parse with an io.Reader, which returns the root of -the parse tree (the document element) as a *Node. It is the caller's -responsibility to ensure that the Reader provides UTF-8 encoded HTML. For -example, to process each anchor node in depth-first order: - - doc, err := html.Parse(r) - if err != nil { - // ... - } - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - // Do something with n... - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(doc) - -The relevant specifications include: -https://html.spec.whatwg.org/multipage/syntax.html and -https://html.spec.whatwg.org/multipage/syntax.html#tokenization -*/ -package html // import "golang.org/x/net/html" - -// The tokenization algorithm implemented by this package is not a line-by-line -// transliteration of the relatively verbose state-machine in the WHATWG -// specification. A more direct approach is used instead, where the program -// counter implies the state, such as whether it is tokenizing a tag or a text -// node. Specification compliance is verified by checking expected and actual -// outputs over a test suite rather than aiming for algorithmic fidelity. - -// TODO(nigeltao): Does a DOM API belong in this package or a separate one? -// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go deleted file mode 100644 index c484e5a..0000000 --- a/vendor/golang.org/x/net/html/doctype.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -// parseDoctype parses the data from a DoctypeToken into a name, -// public identifier, and system identifier. It returns a Node whose Type -// is DoctypeNode, whose Data is the name, and which has attributes -// named "system" and "public" for the two identifiers if they were present. -// quirks is whether the document should be parsed in "quirks mode". -func parseDoctype(s string) (n *Node, quirks bool) { - n = &Node{Type: DoctypeNode} - - // Find the name. - space := strings.IndexAny(s, whitespace) - if space == -1 { - space = len(s) - } - n.Data = s[:space] - // The comparison to "html" is case-sensitive. - if n.Data != "html" { - quirks = true - } - n.Data = strings.ToLower(n.Data) - s = strings.TrimLeft(s[space:], whitespace) - - if len(s) < 6 { - // It can't start with "PUBLIC" or "SYSTEM". - // Ignore the rest of the string. - return n, quirks || s != "" - } - - key := strings.ToLower(s[:6]) - s = s[6:] - for key == "public" || key == "system" { - s = strings.TrimLeft(s, whitespace) - if s == "" { - break - } - quote := s[0] - if quote != '"' && quote != '\'' { - break - } - s = s[1:] - q := strings.IndexRune(s, rune(quote)) - var id string - if q == -1 { - id = s - s = "" - } else { - id = s[:q] - s = s[q+1:] - } - n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) - if key == "public" { - key = "system" - } else { - key = "" - } - } - - if key != "" || s != "" { - quirks = true - } else if len(n.Attr) > 0 { - if n.Attr[0].Key == "public" { - public := strings.ToLower(n.Attr[0].Val) - switch public { - case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": - quirks = true - default: - for _, q := range quirkyIDs { - if strings.HasPrefix(public, q) { - quirks = true - break - } - } - } - // The following two public IDs only cause quirks mode if there is no system ID. - if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || - strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { - quirks = true - } - } - if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { - quirks = true - } - } - - return n, quirks -} - -// quirkyIDs is a list of public doctype identifiers that cause a document -// to be interpreted in quirks mode. The identifiers should be in lower case. -var quirkyIDs = []string{ - "+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//", -} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go deleted file mode 100644 index a50c04c..0000000 --- a/vendor/golang.org/x/net/html/entity.go +++ /dev/null @@ -1,2253 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// All entities that do not end with ';' are 6 or fewer bytes long. -const longestEntityWithoutSemicolon = 6 - -// entity is a map from HTML entity names to their values. The semicolon matters: -// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references -// lists both "amp" and "amp;" as two separate entries. -// -// Note that the HTML5 list is larger than the HTML4 list at -// http://www.w3.org/TR/html4/sgml/entities.html -var entity = map[string]rune{ - "AElig;": '\U000000C6', - "AMP;": '\U00000026', - "Aacute;": '\U000000C1', - "Abreve;": '\U00000102', - "Acirc;": '\U000000C2', - "Acy;": '\U00000410', - "Afr;": '\U0001D504', - "Agrave;": '\U000000C0', - "Alpha;": '\U00000391', - "Amacr;": '\U00000100', - "And;": '\U00002A53', - "Aogon;": '\U00000104', - "Aopf;": '\U0001D538', - "ApplyFunction;": '\U00002061', - "Aring;": '\U000000C5', - "Ascr;": '\U0001D49C', - "Assign;": '\U00002254', - "Atilde;": '\U000000C3', - "Auml;": '\U000000C4', - "Backslash;": '\U00002216', - "Barv;": '\U00002AE7', - "Barwed;": '\U00002306', - "Bcy;": '\U00000411', - "Because;": '\U00002235', - "Bernoullis;": '\U0000212C', - "Beta;": '\U00000392', - "Bfr;": '\U0001D505', - "Bopf;": '\U0001D539', - "Breve;": '\U000002D8', - "Bscr;": '\U0000212C', - "Bumpeq;": '\U0000224E', - "CHcy;": '\U00000427', - "COPY;": '\U000000A9', - "Cacute;": '\U00000106', - "Cap;": '\U000022D2', - "CapitalDifferentialD;": '\U00002145', - "Cayleys;": '\U0000212D', - "Ccaron;": '\U0000010C', - "Ccedil;": '\U000000C7', - "Ccirc;": '\U00000108', - "Cconint;": '\U00002230', - "Cdot;": '\U0000010A', - "Cedilla;": '\U000000B8', - "CenterDot;": '\U000000B7', - "Cfr;": '\U0000212D', - "Chi;": '\U000003A7', - "CircleDot;": '\U00002299', - "CircleMinus;": '\U00002296', - "CirclePlus;": '\U00002295', - "CircleTimes;": '\U00002297', - "ClockwiseContourIntegral;": '\U00002232', - "CloseCurlyDoubleQuote;": '\U0000201D', - "CloseCurlyQuote;": '\U00002019', - "Colon;": '\U00002237', - "Colone;": '\U00002A74', - "Congruent;": '\U00002261', - "Conint;": '\U0000222F', - "ContourIntegral;": '\U0000222E', - "Copf;": '\U00002102', - "Coproduct;": '\U00002210', - "CounterClockwiseContourIntegral;": '\U00002233', - "Cross;": '\U00002A2F', - "Cscr;": '\U0001D49E', - "Cup;": '\U000022D3', - "CupCap;": '\U0000224D', - "DD;": '\U00002145', - "DDotrahd;": '\U00002911', - "DJcy;": '\U00000402', - "DScy;": '\U00000405', - "DZcy;": '\U0000040F', - "Dagger;": '\U00002021', - "Darr;": '\U000021A1', - "Dashv;": '\U00002AE4', - "Dcaron;": '\U0000010E', - "Dcy;": '\U00000414', - "Del;": '\U00002207', - "Delta;": '\U00000394', - "Dfr;": '\U0001D507', - "DiacriticalAcute;": '\U000000B4', - "DiacriticalDot;": '\U000002D9', - "DiacriticalDoubleAcute;": '\U000002DD', - "DiacriticalGrave;": '\U00000060', - "DiacriticalTilde;": '\U000002DC', - "Diamond;": '\U000022C4', - "DifferentialD;": '\U00002146', - "Dopf;": '\U0001D53B', - "Dot;": '\U000000A8', - "DotDot;": '\U000020DC', - "DotEqual;": '\U00002250', - "DoubleContourIntegral;": '\U0000222F', - "DoubleDot;": '\U000000A8', - "DoubleDownArrow;": '\U000021D3', - "DoubleLeftArrow;": '\U000021D0', - "DoubleLeftRightArrow;": '\U000021D4', - "DoubleLeftTee;": '\U00002AE4', - "DoubleLongLeftArrow;": '\U000027F8', - "DoubleLongLeftRightArrow;": '\U000027FA', - "DoubleLongRightArrow;": '\U000027F9', - "DoubleRightArrow;": '\U000021D2', - "DoubleRightTee;": '\U000022A8', - "DoubleUpArrow;": '\U000021D1', - "DoubleUpDownArrow;": '\U000021D5', - "DoubleVerticalBar;": '\U00002225', - "DownArrow;": '\U00002193', - "DownArrowBar;": '\U00002913', - "DownArrowUpArrow;": '\U000021F5', - "DownBreve;": '\U00000311', - "DownLeftRightVector;": '\U00002950', - "DownLeftTeeVector;": '\U0000295E', - "DownLeftVector;": '\U000021BD', - "DownLeftVectorBar;": '\U00002956', - "DownRightTeeVector;": '\U0000295F', - "DownRightVector;": '\U000021C1', - "DownRightVectorBar;": '\U00002957', - "DownTee;": '\U000022A4', - "DownTeeArrow;": '\U000021A7', - "Downarrow;": '\U000021D3', - "Dscr;": '\U0001D49F', - "Dstrok;": '\U00000110', - "ENG;": '\U0000014A', - "ETH;": '\U000000D0', - "Eacute;": '\U000000C9', - "Ecaron;": '\U0000011A', - "Ecirc;": '\U000000CA', - "Ecy;": '\U0000042D', - "Edot;": '\U00000116', - "Efr;": '\U0001D508', - "Egrave;": '\U000000C8', - "Element;": '\U00002208', - "Emacr;": '\U00000112', - "EmptySmallSquare;": '\U000025FB', - "EmptyVerySmallSquare;": '\U000025AB', - "Eogon;": '\U00000118', - "Eopf;": '\U0001D53C', - "Epsilon;": '\U00000395', - "Equal;": '\U00002A75', - "EqualTilde;": '\U00002242', - "Equilibrium;": '\U000021CC', - "Escr;": '\U00002130', - "Esim;": '\U00002A73', - "Eta;": '\U00000397', - "Euml;": '\U000000CB', - "Exists;": '\U00002203', - "ExponentialE;": '\U00002147', - "Fcy;": '\U00000424', - "Ffr;": '\U0001D509', - "FilledSmallSquare;": '\U000025FC', - "FilledVerySmallSquare;": '\U000025AA', - "Fopf;": '\U0001D53D', - "ForAll;": '\U00002200', - "Fouriertrf;": '\U00002131', - "Fscr;": '\U00002131', - "GJcy;": '\U00000403', - "GT;": '\U0000003E', - "Gamma;": '\U00000393', - "Gammad;": '\U000003DC', - "Gbreve;": '\U0000011E', - "Gcedil;": '\U00000122', - "Gcirc;": '\U0000011C', - "Gcy;": '\U00000413', - "Gdot;": '\U00000120', - "Gfr;": '\U0001D50A', - "Gg;": '\U000022D9', - "Gopf;": '\U0001D53E', - "GreaterEqual;": '\U00002265', - "GreaterEqualLess;": '\U000022DB', - "GreaterFullEqual;": '\U00002267', - "GreaterGreater;": '\U00002AA2', - "GreaterLess;": '\U00002277', - "GreaterSlantEqual;": '\U00002A7E', - "GreaterTilde;": '\U00002273', - "Gscr;": '\U0001D4A2', - "Gt;": '\U0000226B', - "HARDcy;": '\U0000042A', - "Hacek;": '\U000002C7', - "Hat;": '\U0000005E', - "Hcirc;": '\U00000124', - "Hfr;": '\U0000210C', - "HilbertSpace;": '\U0000210B', - "Hopf;": '\U0000210D', - "HorizontalLine;": '\U00002500', - "Hscr;": '\U0000210B', - "Hstrok;": '\U00000126', - "HumpDownHump;": '\U0000224E', - "HumpEqual;": '\U0000224F', - "IEcy;": '\U00000415', - "IJlig;": '\U00000132', - "IOcy;": '\U00000401', - "Iacute;": '\U000000CD', - "Icirc;": '\U000000CE', - "Icy;": '\U00000418', - "Idot;": '\U00000130', - "Ifr;": '\U00002111', - "Igrave;": '\U000000CC', - "Im;": '\U00002111', - "Imacr;": '\U0000012A', - "ImaginaryI;": '\U00002148', - "Implies;": '\U000021D2', - "Int;": '\U0000222C', - "Integral;": '\U0000222B', - "Intersection;": '\U000022C2', - "InvisibleComma;": '\U00002063', - "InvisibleTimes;": '\U00002062', - "Iogon;": '\U0000012E', - "Iopf;": '\U0001D540', - "Iota;": '\U00000399', - "Iscr;": '\U00002110', - "Itilde;": '\U00000128', - "Iukcy;": '\U00000406', - "Iuml;": '\U000000CF', - "Jcirc;": '\U00000134', - "Jcy;": '\U00000419', - "Jfr;": '\U0001D50D', - "Jopf;": '\U0001D541', - "Jscr;": '\U0001D4A5', - "Jsercy;": '\U00000408', - "Jukcy;": '\U00000404', - "KHcy;": '\U00000425', - "KJcy;": '\U0000040C', - "Kappa;": '\U0000039A', - "Kcedil;": '\U00000136', - "Kcy;": '\U0000041A', - "Kfr;": '\U0001D50E', - "Kopf;": '\U0001D542', - "Kscr;": '\U0001D4A6', - "LJcy;": '\U00000409', - "LT;": '\U0000003C', - "Lacute;": '\U00000139', - "Lambda;": '\U0000039B', - "Lang;": '\U000027EA', - "Laplacetrf;": '\U00002112', - "Larr;": '\U0000219E', - "Lcaron;": '\U0000013D', - "Lcedil;": '\U0000013B', - "Lcy;": '\U0000041B', - "LeftAngleBracket;": '\U000027E8', - "LeftArrow;": '\U00002190', - "LeftArrowBar;": '\U000021E4', - "LeftArrowRightArrow;": '\U000021C6', - "LeftCeiling;": '\U00002308', - "LeftDoubleBracket;": '\U000027E6', - "LeftDownTeeVector;": '\U00002961', - "LeftDownVector;": '\U000021C3', - "LeftDownVectorBar;": '\U00002959', - "LeftFloor;": '\U0000230A', - "LeftRightArrow;": '\U00002194', - "LeftRightVector;": '\U0000294E', - "LeftTee;": '\U000022A3', - "LeftTeeArrow;": '\U000021A4', - "LeftTeeVector;": '\U0000295A', - "LeftTriangle;": '\U000022B2', - "LeftTriangleBar;": '\U000029CF', - "LeftTriangleEqual;": '\U000022B4', - "LeftUpDownVector;": '\U00002951', - "LeftUpTeeVector;": '\U00002960', - "LeftUpVector;": '\U000021BF', - "LeftUpVectorBar;": '\U00002958', - "LeftVector;": '\U000021BC', - "LeftVectorBar;": '\U00002952', - "Leftarrow;": '\U000021D0', - "Leftrightarrow;": '\U000021D4', - "LessEqualGreater;": '\U000022DA', - "LessFullEqual;": '\U00002266', - "LessGreater;": '\U00002276', - "LessLess;": '\U00002AA1', - "LessSlantEqual;": '\U00002A7D', - "LessTilde;": '\U00002272', - "Lfr;": '\U0001D50F', - "Ll;": '\U000022D8', - "Lleftarrow;": '\U000021DA', - "Lmidot;": '\U0000013F', - "LongLeftArrow;": '\U000027F5', - "LongLeftRightArrow;": '\U000027F7', - "LongRightArrow;": '\U000027F6', - "Longleftarrow;": '\U000027F8', - "Longleftrightarrow;": '\U000027FA', - "Longrightarrow;": '\U000027F9', - "Lopf;": '\U0001D543', - "LowerLeftArrow;": '\U00002199', - "LowerRightArrow;": '\U00002198', - "Lscr;": '\U00002112', - "Lsh;": '\U000021B0', - "Lstrok;": '\U00000141', - "Lt;": '\U0000226A', - "Map;": '\U00002905', - "Mcy;": '\U0000041C', - "MediumSpace;": '\U0000205F', - "Mellintrf;": '\U00002133', - "Mfr;": '\U0001D510', - "MinusPlus;": '\U00002213', - "Mopf;": '\U0001D544', - "Mscr;": '\U00002133', - "Mu;": '\U0000039C', - "NJcy;": '\U0000040A', - "Nacute;": '\U00000143', - "Ncaron;": '\U00000147', - "Ncedil;": '\U00000145', - "Ncy;": '\U0000041D', - "NegativeMediumSpace;": '\U0000200B', - "NegativeThickSpace;": '\U0000200B', - "NegativeThinSpace;": '\U0000200B', - "NegativeVeryThinSpace;": '\U0000200B', - "NestedGreaterGreater;": '\U0000226B', - "NestedLessLess;": '\U0000226A', - "NewLine;": '\U0000000A', - "Nfr;": '\U0001D511', - "NoBreak;": '\U00002060', - "NonBreakingSpace;": '\U000000A0', - "Nopf;": '\U00002115', - "Not;": '\U00002AEC', - "NotCongruent;": '\U00002262', - "NotCupCap;": '\U0000226D', - "NotDoubleVerticalBar;": '\U00002226', - "NotElement;": '\U00002209', - "NotEqual;": '\U00002260', - "NotExists;": '\U00002204', - "NotGreater;": '\U0000226F', - "NotGreaterEqual;": '\U00002271', - "NotGreaterLess;": '\U00002279', - "NotGreaterTilde;": '\U00002275', - "NotLeftTriangle;": '\U000022EA', - "NotLeftTriangleEqual;": '\U000022EC', - "NotLess;": '\U0000226E', - "NotLessEqual;": '\U00002270', - "NotLessGreater;": '\U00002278', - "NotLessTilde;": '\U00002274', - "NotPrecedes;": '\U00002280', - "NotPrecedesSlantEqual;": '\U000022E0', - "NotReverseElement;": '\U0000220C', - "NotRightTriangle;": '\U000022EB', - "NotRightTriangleEqual;": '\U000022ED', - "NotSquareSubsetEqual;": '\U000022E2', - "NotSquareSupersetEqual;": '\U000022E3', - "NotSubsetEqual;": '\U00002288', - "NotSucceeds;": '\U00002281', - "NotSucceedsSlantEqual;": '\U000022E1', - "NotSupersetEqual;": '\U00002289', - "NotTilde;": '\U00002241', - "NotTildeEqual;": '\U00002244', - "NotTildeFullEqual;": '\U00002247', - "NotTildeTilde;": '\U00002249', - "NotVerticalBar;": '\U00002224', - "Nscr;": '\U0001D4A9', - "Ntilde;": '\U000000D1', - "Nu;": '\U0000039D', - "OElig;": '\U00000152', - "Oacute;": '\U000000D3', - "Ocirc;": '\U000000D4', - "Ocy;": '\U0000041E', - "Odblac;": '\U00000150', - "Ofr;": '\U0001D512', - "Ograve;": '\U000000D2', - "Omacr;": '\U0000014C', - "Omega;": '\U000003A9', - "Omicron;": '\U0000039F', - "Oopf;": '\U0001D546', - "OpenCurlyDoubleQuote;": '\U0000201C', - "OpenCurlyQuote;": '\U00002018', - "Or;": '\U00002A54', - "Oscr;": '\U0001D4AA', - "Oslash;": '\U000000D8', - "Otilde;": '\U000000D5', - "Otimes;": '\U00002A37', - "Ouml;": '\U000000D6', - "OverBar;": '\U0000203E', - "OverBrace;": '\U000023DE', - "OverBracket;": '\U000023B4', - "OverParenthesis;": '\U000023DC', - "PartialD;": '\U00002202', - "Pcy;": '\U0000041F', - "Pfr;": '\U0001D513', - "Phi;": '\U000003A6', - "Pi;": '\U000003A0', - "PlusMinus;": '\U000000B1', - "Poincareplane;": '\U0000210C', - "Popf;": '\U00002119', - "Pr;": '\U00002ABB', - "Precedes;": '\U0000227A', - "PrecedesEqual;": '\U00002AAF', - "PrecedesSlantEqual;": '\U0000227C', - "PrecedesTilde;": '\U0000227E', - "Prime;": '\U00002033', - "Product;": '\U0000220F', - "Proportion;": '\U00002237', - "Proportional;": '\U0000221D', - "Pscr;": '\U0001D4AB', - "Psi;": '\U000003A8', - "QUOT;": '\U00000022', - "Qfr;": '\U0001D514', - "Qopf;": '\U0000211A', - "Qscr;": '\U0001D4AC', - "RBarr;": '\U00002910', - "REG;": '\U000000AE', - "Racute;": '\U00000154', - "Rang;": '\U000027EB', - "Rarr;": '\U000021A0', - "Rarrtl;": '\U00002916', - "Rcaron;": '\U00000158', - "Rcedil;": '\U00000156', - "Rcy;": '\U00000420', - "Re;": '\U0000211C', - "ReverseElement;": '\U0000220B', - "ReverseEquilibrium;": '\U000021CB', - "ReverseUpEquilibrium;": '\U0000296F', - "Rfr;": '\U0000211C', - "Rho;": '\U000003A1', - "RightAngleBracket;": '\U000027E9', - "RightArrow;": '\U00002192', - "RightArrowBar;": '\U000021E5', - "RightArrowLeftArrow;": '\U000021C4', - "RightCeiling;": '\U00002309', - "RightDoubleBracket;": '\U000027E7', - "RightDownTeeVector;": '\U0000295D', - "RightDownVector;": '\U000021C2', - "RightDownVectorBar;": '\U00002955', - "RightFloor;": '\U0000230B', - "RightTee;": '\U000022A2', - "RightTeeArrow;": '\U000021A6', - "RightTeeVector;": '\U0000295B', - "RightTriangle;": '\U000022B3', - "RightTriangleBar;": '\U000029D0', - "RightTriangleEqual;": '\U000022B5', - "RightUpDownVector;": '\U0000294F', - "RightUpTeeVector;": '\U0000295C', - "RightUpVector;": '\U000021BE', - "RightUpVectorBar;": '\U00002954', - "RightVector;": '\U000021C0', - "RightVectorBar;": '\U00002953', - "Rightarrow;": '\U000021D2', - "Ropf;": '\U0000211D', - "RoundImplies;": '\U00002970', - "Rrightarrow;": '\U000021DB', - "Rscr;": '\U0000211B', - "Rsh;": '\U000021B1', - "RuleDelayed;": '\U000029F4', - "SHCHcy;": '\U00000429', - "SHcy;": '\U00000428', - "SOFTcy;": '\U0000042C', - "Sacute;": '\U0000015A', - "Sc;": '\U00002ABC', - "Scaron;": '\U00000160', - "Scedil;": '\U0000015E', - "Scirc;": '\U0000015C', - "Scy;": '\U00000421', - "Sfr;": '\U0001D516', - "ShortDownArrow;": '\U00002193', - "ShortLeftArrow;": '\U00002190', - "ShortRightArrow;": '\U00002192', - "ShortUpArrow;": '\U00002191', - "Sigma;": '\U000003A3', - "SmallCircle;": '\U00002218', - "Sopf;": '\U0001D54A', - "Sqrt;": '\U0000221A', - "Square;": '\U000025A1', - "SquareIntersection;": '\U00002293', - "SquareSubset;": '\U0000228F', - "SquareSubsetEqual;": '\U00002291', - "SquareSuperset;": '\U00002290', - "SquareSupersetEqual;": '\U00002292', - "SquareUnion;": '\U00002294', - "Sscr;": '\U0001D4AE', - "Star;": '\U000022C6', - "Sub;": '\U000022D0', - "Subset;": '\U000022D0', - "SubsetEqual;": '\U00002286', - "Succeeds;": '\U0000227B', - "SucceedsEqual;": '\U00002AB0', - "SucceedsSlantEqual;": '\U0000227D', - "SucceedsTilde;": '\U0000227F', - "SuchThat;": '\U0000220B', - "Sum;": '\U00002211', - "Sup;": '\U000022D1', - "Superset;": '\U00002283', - "SupersetEqual;": '\U00002287', - "Supset;": '\U000022D1', - "THORN;": '\U000000DE', - "TRADE;": '\U00002122', - "TSHcy;": '\U0000040B', - "TScy;": '\U00000426', - "Tab;": '\U00000009', - "Tau;": '\U000003A4', - "Tcaron;": '\U00000164', - "Tcedil;": '\U00000162', - "Tcy;": '\U00000422', - "Tfr;": '\U0001D517', - "Therefore;": '\U00002234', - "Theta;": '\U00000398', - "ThinSpace;": '\U00002009', - "Tilde;": '\U0000223C', - "TildeEqual;": '\U00002243', - "TildeFullEqual;": '\U00002245', - "TildeTilde;": '\U00002248', - "Topf;": '\U0001D54B', - "TripleDot;": '\U000020DB', - "Tscr;": '\U0001D4AF', - "Tstrok;": '\U00000166', - "Uacute;": '\U000000DA', - "Uarr;": '\U0000219F', - "Uarrocir;": '\U00002949', - "Ubrcy;": '\U0000040E', - "Ubreve;": '\U0000016C', - "Ucirc;": '\U000000DB', - "Ucy;": '\U00000423', - "Udblac;": '\U00000170', - "Ufr;": '\U0001D518', - "Ugrave;": '\U000000D9', - "Umacr;": '\U0000016A', - "UnderBar;": '\U0000005F', - "UnderBrace;": '\U000023DF', - "UnderBracket;": '\U000023B5', - "UnderParenthesis;": '\U000023DD', - "Union;": '\U000022C3', - "UnionPlus;": '\U0000228E', - "Uogon;": '\U00000172', - "Uopf;": '\U0001D54C', - "UpArrow;": '\U00002191', - "UpArrowBar;": '\U00002912', - "UpArrowDownArrow;": '\U000021C5', - "UpDownArrow;": '\U00002195', - "UpEquilibrium;": '\U0000296E', - "UpTee;": '\U000022A5', - "UpTeeArrow;": '\U000021A5', - "Uparrow;": '\U000021D1', - "Updownarrow;": '\U000021D5', - "UpperLeftArrow;": '\U00002196', - "UpperRightArrow;": '\U00002197', - "Upsi;": '\U000003D2', - "Upsilon;": '\U000003A5', - "Uring;": '\U0000016E', - "Uscr;": '\U0001D4B0', - "Utilde;": '\U00000168', - "Uuml;": '\U000000DC', - "VDash;": '\U000022AB', - "Vbar;": '\U00002AEB', - "Vcy;": '\U00000412', - "Vdash;": '\U000022A9', - "Vdashl;": '\U00002AE6', - "Vee;": '\U000022C1', - "Verbar;": '\U00002016', - "Vert;": '\U00002016', - "VerticalBar;": '\U00002223', - "VerticalLine;": '\U0000007C', - "VerticalSeparator;": '\U00002758', - "VerticalTilde;": '\U00002240', - "VeryThinSpace;": '\U0000200A', - "Vfr;": '\U0001D519', - "Vopf;": '\U0001D54D', - "Vscr;": '\U0001D4B1', - "Vvdash;": '\U000022AA', - "Wcirc;": '\U00000174', - "Wedge;": '\U000022C0', - "Wfr;": '\U0001D51A', - "Wopf;": '\U0001D54E', - "Wscr;": '\U0001D4B2', - "Xfr;": '\U0001D51B', - "Xi;": '\U0000039E', - "Xopf;": '\U0001D54F', - "Xscr;": '\U0001D4B3', - "YAcy;": '\U0000042F', - "YIcy;": '\U00000407', - "YUcy;": '\U0000042E', - "Yacute;": '\U000000DD', - "Ycirc;": '\U00000176', - "Ycy;": '\U0000042B', - "Yfr;": '\U0001D51C', - "Yopf;": '\U0001D550', - "Yscr;": '\U0001D4B4', - "Yuml;": '\U00000178', - "ZHcy;": '\U00000416', - "Zacute;": '\U00000179', - "Zcaron;": '\U0000017D', - "Zcy;": '\U00000417', - "Zdot;": '\U0000017B', - "ZeroWidthSpace;": '\U0000200B', - "Zeta;": '\U00000396', - "Zfr;": '\U00002128', - "Zopf;": '\U00002124', - "Zscr;": '\U0001D4B5', - "aacute;": '\U000000E1', - "abreve;": '\U00000103', - "ac;": '\U0000223E', - "acd;": '\U0000223F', - "acirc;": '\U000000E2', - "acute;": '\U000000B4', - "acy;": '\U00000430', - "aelig;": '\U000000E6', - "af;": '\U00002061', - "afr;": '\U0001D51E', - "agrave;": '\U000000E0', - "alefsym;": '\U00002135', - "aleph;": '\U00002135', - "alpha;": '\U000003B1', - "amacr;": '\U00000101', - "amalg;": '\U00002A3F', - "amp;": '\U00000026', - "and;": '\U00002227', - "andand;": '\U00002A55', - "andd;": '\U00002A5C', - "andslope;": '\U00002A58', - "andv;": '\U00002A5A', - "ang;": '\U00002220', - "ange;": '\U000029A4', - "angle;": '\U00002220', - "angmsd;": '\U00002221', - "angmsdaa;": '\U000029A8', - "angmsdab;": '\U000029A9', - "angmsdac;": '\U000029AA', - "angmsdad;": '\U000029AB', - "angmsdae;": '\U000029AC', - "angmsdaf;": '\U000029AD', - "angmsdag;": '\U000029AE', - "angmsdah;": '\U000029AF', - "angrt;": '\U0000221F', - "angrtvb;": '\U000022BE', - "angrtvbd;": '\U0000299D', - "angsph;": '\U00002222', - "angst;": '\U000000C5', - "angzarr;": '\U0000237C', - "aogon;": '\U00000105', - "aopf;": '\U0001D552', - "ap;": '\U00002248', - "apE;": '\U00002A70', - "apacir;": '\U00002A6F', - "ape;": '\U0000224A', - "apid;": '\U0000224B', - "apos;": '\U00000027', - "approx;": '\U00002248', - "approxeq;": '\U0000224A', - "aring;": '\U000000E5', - "ascr;": '\U0001D4B6', - "ast;": '\U0000002A', - "asymp;": '\U00002248', - "asympeq;": '\U0000224D', - "atilde;": '\U000000E3', - "auml;": '\U000000E4', - "awconint;": '\U00002233', - "awint;": '\U00002A11', - "bNot;": '\U00002AED', - "backcong;": '\U0000224C', - "backepsilon;": '\U000003F6', - "backprime;": '\U00002035', - "backsim;": '\U0000223D', - "backsimeq;": '\U000022CD', - "barvee;": '\U000022BD', - "barwed;": '\U00002305', - "barwedge;": '\U00002305', - "bbrk;": '\U000023B5', - "bbrktbrk;": '\U000023B6', - "bcong;": '\U0000224C', - "bcy;": '\U00000431', - "bdquo;": '\U0000201E', - "becaus;": '\U00002235', - "because;": '\U00002235', - "bemptyv;": '\U000029B0', - "bepsi;": '\U000003F6', - "bernou;": '\U0000212C', - "beta;": '\U000003B2', - "beth;": '\U00002136', - "between;": '\U0000226C', - "bfr;": '\U0001D51F', - "bigcap;": '\U000022C2', - "bigcirc;": '\U000025EF', - "bigcup;": '\U000022C3', - "bigodot;": '\U00002A00', - "bigoplus;": '\U00002A01', - "bigotimes;": '\U00002A02', - "bigsqcup;": '\U00002A06', - "bigstar;": '\U00002605', - "bigtriangledown;": '\U000025BD', - "bigtriangleup;": '\U000025B3', - "biguplus;": '\U00002A04', - "bigvee;": '\U000022C1', - "bigwedge;": '\U000022C0', - "bkarow;": '\U0000290D', - "blacklozenge;": '\U000029EB', - "blacksquare;": '\U000025AA', - "blacktriangle;": '\U000025B4', - "blacktriangledown;": '\U000025BE', - "blacktriangleleft;": '\U000025C2', - "blacktriangleright;": '\U000025B8', - "blank;": '\U00002423', - "blk12;": '\U00002592', - "blk14;": '\U00002591', - "blk34;": '\U00002593', - "block;": '\U00002588', - "bnot;": '\U00002310', - "bopf;": '\U0001D553', - "bot;": '\U000022A5', - "bottom;": '\U000022A5', - "bowtie;": '\U000022C8', - "boxDL;": '\U00002557', - "boxDR;": '\U00002554', - "boxDl;": '\U00002556', - "boxDr;": '\U00002553', - "boxH;": '\U00002550', - "boxHD;": '\U00002566', - "boxHU;": '\U00002569', - "boxHd;": '\U00002564', - "boxHu;": '\U00002567', - "boxUL;": '\U0000255D', - "boxUR;": '\U0000255A', - "boxUl;": '\U0000255C', - "boxUr;": '\U00002559', - "boxV;": '\U00002551', - "boxVH;": '\U0000256C', - "boxVL;": '\U00002563', - "boxVR;": '\U00002560', - "boxVh;": '\U0000256B', - "boxVl;": '\U00002562', - "boxVr;": '\U0000255F', - "boxbox;": '\U000029C9', - "boxdL;": '\U00002555', - "boxdR;": '\U00002552', - "boxdl;": '\U00002510', - "boxdr;": '\U0000250C', - "boxh;": '\U00002500', - "boxhD;": '\U00002565', - "boxhU;": '\U00002568', - "boxhd;": '\U0000252C', - "boxhu;": '\U00002534', - "boxminus;": '\U0000229F', - "boxplus;": '\U0000229E', - "boxtimes;": '\U000022A0', - "boxuL;": '\U0000255B', - "boxuR;": '\U00002558', - "boxul;": '\U00002518', - "boxur;": '\U00002514', - "boxv;": '\U00002502', - "boxvH;": '\U0000256A', - "boxvL;": '\U00002561', - "boxvR;": '\U0000255E', - "boxvh;": '\U0000253C', - "boxvl;": '\U00002524', - "boxvr;": '\U0000251C', - "bprime;": '\U00002035', - "breve;": '\U000002D8', - "brvbar;": '\U000000A6', - "bscr;": '\U0001D4B7', - "bsemi;": '\U0000204F', - "bsim;": '\U0000223D', - "bsime;": '\U000022CD', - "bsol;": '\U0000005C', - "bsolb;": '\U000029C5', - "bsolhsub;": '\U000027C8', - "bull;": '\U00002022', - "bullet;": '\U00002022', - "bump;": '\U0000224E', - "bumpE;": '\U00002AAE', - "bumpe;": '\U0000224F', - "bumpeq;": '\U0000224F', - "cacute;": '\U00000107', - "cap;": '\U00002229', - "capand;": '\U00002A44', - "capbrcup;": '\U00002A49', - "capcap;": '\U00002A4B', - "capcup;": '\U00002A47', - "capdot;": '\U00002A40', - "caret;": '\U00002041', - "caron;": '\U000002C7', - "ccaps;": '\U00002A4D', - "ccaron;": '\U0000010D', - "ccedil;": '\U000000E7', - "ccirc;": '\U00000109', - "ccups;": '\U00002A4C', - "ccupssm;": '\U00002A50', - "cdot;": '\U0000010B', - "cedil;": '\U000000B8', - "cemptyv;": '\U000029B2', - "cent;": '\U000000A2', - "centerdot;": '\U000000B7', - "cfr;": '\U0001D520', - "chcy;": '\U00000447', - "check;": '\U00002713', - "checkmark;": '\U00002713', - "chi;": '\U000003C7', - "cir;": '\U000025CB', - "cirE;": '\U000029C3', - "circ;": '\U000002C6', - "circeq;": '\U00002257', - "circlearrowleft;": '\U000021BA', - "circlearrowright;": '\U000021BB', - "circledR;": '\U000000AE', - "circledS;": '\U000024C8', - "circledast;": '\U0000229B', - "circledcirc;": '\U0000229A', - "circleddash;": '\U0000229D', - "cire;": '\U00002257', - "cirfnint;": '\U00002A10', - "cirmid;": '\U00002AEF', - "cirscir;": '\U000029C2', - "clubs;": '\U00002663', - "clubsuit;": '\U00002663', - "colon;": '\U0000003A', - "colone;": '\U00002254', - "coloneq;": '\U00002254', - "comma;": '\U0000002C', - "commat;": '\U00000040', - "comp;": '\U00002201', - "compfn;": '\U00002218', - "complement;": '\U00002201', - "complexes;": '\U00002102', - "cong;": '\U00002245', - "congdot;": '\U00002A6D', - "conint;": '\U0000222E', - "copf;": '\U0001D554', - "coprod;": '\U00002210', - "copy;": '\U000000A9', - "copysr;": '\U00002117', - "crarr;": '\U000021B5', - "cross;": '\U00002717', - "cscr;": '\U0001D4B8', - "csub;": '\U00002ACF', - "csube;": '\U00002AD1', - "csup;": '\U00002AD0', - "csupe;": '\U00002AD2', - "ctdot;": '\U000022EF', - "cudarrl;": '\U00002938', - "cudarrr;": '\U00002935', - "cuepr;": '\U000022DE', - "cuesc;": '\U000022DF', - "cularr;": '\U000021B6', - "cularrp;": '\U0000293D', - "cup;": '\U0000222A', - "cupbrcap;": '\U00002A48', - "cupcap;": '\U00002A46', - "cupcup;": '\U00002A4A', - "cupdot;": '\U0000228D', - "cupor;": '\U00002A45', - "curarr;": '\U000021B7', - "curarrm;": '\U0000293C', - "curlyeqprec;": '\U000022DE', - "curlyeqsucc;": '\U000022DF', - "curlyvee;": '\U000022CE', - "curlywedge;": '\U000022CF', - "curren;": '\U000000A4', - "curvearrowleft;": '\U000021B6', - "curvearrowright;": '\U000021B7', - "cuvee;": '\U000022CE', - "cuwed;": '\U000022CF', - "cwconint;": '\U00002232', - "cwint;": '\U00002231', - "cylcty;": '\U0000232D', - "dArr;": '\U000021D3', - "dHar;": '\U00002965', - "dagger;": '\U00002020', - "daleth;": '\U00002138', - "darr;": '\U00002193', - "dash;": '\U00002010', - "dashv;": '\U000022A3', - "dbkarow;": '\U0000290F', - "dblac;": '\U000002DD', - "dcaron;": '\U0000010F', - "dcy;": '\U00000434', - "dd;": '\U00002146', - "ddagger;": '\U00002021', - "ddarr;": '\U000021CA', - "ddotseq;": '\U00002A77', - "deg;": '\U000000B0', - "delta;": '\U000003B4', - "demptyv;": '\U000029B1', - "dfisht;": '\U0000297F', - "dfr;": '\U0001D521', - "dharl;": '\U000021C3', - "dharr;": '\U000021C2', - "diam;": '\U000022C4', - "diamond;": '\U000022C4', - "diamondsuit;": '\U00002666', - "diams;": '\U00002666', - "die;": '\U000000A8', - "digamma;": '\U000003DD', - "disin;": '\U000022F2', - "div;": '\U000000F7', - "divide;": '\U000000F7', - "divideontimes;": '\U000022C7', - "divonx;": '\U000022C7', - "djcy;": '\U00000452', - "dlcorn;": '\U0000231E', - "dlcrop;": '\U0000230D', - "dollar;": '\U00000024', - "dopf;": '\U0001D555', - "dot;": '\U000002D9', - "doteq;": '\U00002250', - "doteqdot;": '\U00002251', - "dotminus;": '\U00002238', - "dotplus;": '\U00002214', - "dotsquare;": '\U000022A1', - "doublebarwedge;": '\U00002306', - "downarrow;": '\U00002193', - "downdownarrows;": '\U000021CA', - "downharpoonleft;": '\U000021C3', - "downharpoonright;": '\U000021C2', - "drbkarow;": '\U00002910', - "drcorn;": '\U0000231F', - "drcrop;": '\U0000230C', - "dscr;": '\U0001D4B9', - "dscy;": '\U00000455', - "dsol;": '\U000029F6', - "dstrok;": '\U00000111', - "dtdot;": '\U000022F1', - "dtri;": '\U000025BF', - "dtrif;": '\U000025BE', - "duarr;": '\U000021F5', - "duhar;": '\U0000296F', - "dwangle;": '\U000029A6', - "dzcy;": '\U0000045F', - "dzigrarr;": '\U000027FF', - "eDDot;": '\U00002A77', - "eDot;": '\U00002251', - "eacute;": '\U000000E9', - "easter;": '\U00002A6E', - "ecaron;": '\U0000011B', - "ecir;": '\U00002256', - "ecirc;": '\U000000EA', - "ecolon;": '\U00002255', - "ecy;": '\U0000044D', - "edot;": '\U00000117', - "ee;": '\U00002147', - "efDot;": '\U00002252', - "efr;": '\U0001D522', - "eg;": '\U00002A9A', - "egrave;": '\U000000E8', - "egs;": '\U00002A96', - "egsdot;": '\U00002A98', - "el;": '\U00002A99', - "elinters;": '\U000023E7', - "ell;": '\U00002113', - "els;": '\U00002A95', - "elsdot;": '\U00002A97', - "emacr;": '\U00000113', - "empty;": '\U00002205', - "emptyset;": '\U00002205', - "emptyv;": '\U00002205', - "emsp;": '\U00002003', - "emsp13;": '\U00002004', - "emsp14;": '\U00002005', - "eng;": '\U0000014B', - "ensp;": '\U00002002', - "eogon;": '\U00000119', - "eopf;": '\U0001D556', - "epar;": '\U000022D5', - "eparsl;": '\U000029E3', - "eplus;": '\U00002A71', - "epsi;": '\U000003B5', - "epsilon;": '\U000003B5', - "epsiv;": '\U000003F5', - "eqcirc;": '\U00002256', - "eqcolon;": '\U00002255', - "eqsim;": '\U00002242', - "eqslantgtr;": '\U00002A96', - "eqslantless;": '\U00002A95', - "equals;": '\U0000003D', - "equest;": '\U0000225F', - "equiv;": '\U00002261', - "equivDD;": '\U00002A78', - "eqvparsl;": '\U000029E5', - "erDot;": '\U00002253', - "erarr;": '\U00002971', - "escr;": '\U0000212F', - "esdot;": '\U00002250', - "esim;": '\U00002242', - "eta;": '\U000003B7', - "eth;": '\U000000F0', - "euml;": '\U000000EB', - "euro;": '\U000020AC', - "excl;": '\U00000021', - "exist;": '\U00002203', - "expectation;": '\U00002130', - "exponentiale;": '\U00002147', - "fallingdotseq;": '\U00002252', - "fcy;": '\U00000444', - "female;": '\U00002640', - "ffilig;": '\U0000FB03', - "fflig;": '\U0000FB00', - "ffllig;": '\U0000FB04', - "ffr;": '\U0001D523', - "filig;": '\U0000FB01', - "flat;": '\U0000266D', - "fllig;": '\U0000FB02', - "fltns;": '\U000025B1', - "fnof;": '\U00000192', - "fopf;": '\U0001D557', - "forall;": '\U00002200', - "fork;": '\U000022D4', - "forkv;": '\U00002AD9', - "fpartint;": '\U00002A0D', - "frac12;": '\U000000BD', - "frac13;": '\U00002153', - "frac14;": '\U000000BC', - "frac15;": '\U00002155', - "frac16;": '\U00002159', - "frac18;": '\U0000215B', - "frac23;": '\U00002154', - "frac25;": '\U00002156', - "frac34;": '\U000000BE', - "frac35;": '\U00002157', - "frac38;": '\U0000215C', - "frac45;": '\U00002158', - "frac56;": '\U0000215A', - "frac58;": '\U0000215D', - "frac78;": '\U0000215E', - "frasl;": '\U00002044', - "frown;": '\U00002322', - "fscr;": '\U0001D4BB', - "gE;": '\U00002267', - "gEl;": '\U00002A8C', - "gacute;": '\U000001F5', - "gamma;": '\U000003B3', - "gammad;": '\U000003DD', - "gap;": '\U00002A86', - "gbreve;": '\U0000011F', - "gcirc;": '\U0000011D', - "gcy;": '\U00000433', - "gdot;": '\U00000121', - "ge;": '\U00002265', - "gel;": '\U000022DB', - "geq;": '\U00002265', - "geqq;": '\U00002267', - "geqslant;": '\U00002A7E', - "ges;": '\U00002A7E', - "gescc;": '\U00002AA9', - "gesdot;": '\U00002A80', - "gesdoto;": '\U00002A82', - "gesdotol;": '\U00002A84', - "gesles;": '\U00002A94', - "gfr;": '\U0001D524', - "gg;": '\U0000226B', - "ggg;": '\U000022D9', - "gimel;": '\U00002137', - "gjcy;": '\U00000453', - "gl;": '\U00002277', - "glE;": '\U00002A92', - "gla;": '\U00002AA5', - "glj;": '\U00002AA4', - "gnE;": '\U00002269', - "gnap;": '\U00002A8A', - "gnapprox;": '\U00002A8A', - "gne;": '\U00002A88', - "gneq;": '\U00002A88', - "gneqq;": '\U00002269', - "gnsim;": '\U000022E7', - "gopf;": '\U0001D558', - "grave;": '\U00000060', - "gscr;": '\U0000210A', - "gsim;": '\U00002273', - "gsime;": '\U00002A8E', - "gsiml;": '\U00002A90', - "gt;": '\U0000003E', - "gtcc;": '\U00002AA7', - "gtcir;": '\U00002A7A', - "gtdot;": '\U000022D7', - "gtlPar;": '\U00002995', - "gtquest;": '\U00002A7C', - "gtrapprox;": '\U00002A86', - "gtrarr;": '\U00002978', - "gtrdot;": '\U000022D7', - "gtreqless;": '\U000022DB', - "gtreqqless;": '\U00002A8C', - "gtrless;": '\U00002277', - "gtrsim;": '\U00002273', - "hArr;": '\U000021D4', - "hairsp;": '\U0000200A', - "half;": '\U000000BD', - "hamilt;": '\U0000210B', - "hardcy;": '\U0000044A', - "harr;": '\U00002194', - "harrcir;": '\U00002948', - "harrw;": '\U000021AD', - "hbar;": '\U0000210F', - "hcirc;": '\U00000125', - "hearts;": '\U00002665', - "heartsuit;": '\U00002665', - "hellip;": '\U00002026', - "hercon;": '\U000022B9', - "hfr;": '\U0001D525', - "hksearow;": '\U00002925', - "hkswarow;": '\U00002926', - "hoarr;": '\U000021FF', - "homtht;": '\U0000223B', - "hookleftarrow;": '\U000021A9', - "hookrightarrow;": '\U000021AA', - "hopf;": '\U0001D559', - "horbar;": '\U00002015', - "hscr;": '\U0001D4BD', - "hslash;": '\U0000210F', - "hstrok;": '\U00000127', - "hybull;": '\U00002043', - "hyphen;": '\U00002010', - "iacute;": '\U000000ED', - "ic;": '\U00002063', - "icirc;": '\U000000EE', - "icy;": '\U00000438', - "iecy;": '\U00000435', - "iexcl;": '\U000000A1', - "iff;": '\U000021D4', - "ifr;": '\U0001D526', - "igrave;": '\U000000EC', - "ii;": '\U00002148', - "iiiint;": '\U00002A0C', - "iiint;": '\U0000222D', - "iinfin;": '\U000029DC', - "iiota;": '\U00002129', - "ijlig;": '\U00000133', - "imacr;": '\U0000012B', - "image;": '\U00002111', - "imagline;": '\U00002110', - "imagpart;": '\U00002111', - "imath;": '\U00000131', - "imof;": '\U000022B7', - "imped;": '\U000001B5', - "in;": '\U00002208', - "incare;": '\U00002105', - "infin;": '\U0000221E', - "infintie;": '\U000029DD', - "inodot;": '\U00000131', - "int;": '\U0000222B', - "intcal;": '\U000022BA', - "integers;": '\U00002124', - "intercal;": '\U000022BA', - "intlarhk;": '\U00002A17', - "intprod;": '\U00002A3C', - "iocy;": '\U00000451', - "iogon;": '\U0000012F', - "iopf;": '\U0001D55A', - "iota;": '\U000003B9', - "iprod;": '\U00002A3C', - "iquest;": '\U000000BF', - "iscr;": '\U0001D4BE', - "isin;": '\U00002208', - "isinE;": '\U000022F9', - "isindot;": '\U000022F5', - "isins;": '\U000022F4', - "isinsv;": '\U000022F3', - "isinv;": '\U00002208', - "it;": '\U00002062', - "itilde;": '\U00000129', - "iukcy;": '\U00000456', - "iuml;": '\U000000EF', - "jcirc;": '\U00000135', - "jcy;": '\U00000439', - "jfr;": '\U0001D527', - "jmath;": '\U00000237', - "jopf;": '\U0001D55B', - "jscr;": '\U0001D4BF', - "jsercy;": '\U00000458', - "jukcy;": '\U00000454', - "kappa;": '\U000003BA', - "kappav;": '\U000003F0', - "kcedil;": '\U00000137', - "kcy;": '\U0000043A', - "kfr;": '\U0001D528', - "kgreen;": '\U00000138', - "khcy;": '\U00000445', - "kjcy;": '\U0000045C', - "kopf;": '\U0001D55C', - "kscr;": '\U0001D4C0', - "lAarr;": '\U000021DA', - "lArr;": '\U000021D0', - "lAtail;": '\U0000291B', - "lBarr;": '\U0000290E', - "lE;": '\U00002266', - "lEg;": '\U00002A8B', - "lHar;": '\U00002962', - "lacute;": '\U0000013A', - "laemptyv;": '\U000029B4', - "lagran;": '\U00002112', - "lambda;": '\U000003BB', - "lang;": '\U000027E8', - "langd;": '\U00002991', - "langle;": '\U000027E8', - "lap;": '\U00002A85', - "laquo;": '\U000000AB', - "larr;": '\U00002190', - "larrb;": '\U000021E4', - "larrbfs;": '\U0000291F', - "larrfs;": '\U0000291D', - "larrhk;": '\U000021A9', - "larrlp;": '\U000021AB', - "larrpl;": '\U00002939', - "larrsim;": '\U00002973', - "larrtl;": '\U000021A2', - "lat;": '\U00002AAB', - "latail;": '\U00002919', - "late;": '\U00002AAD', - "lbarr;": '\U0000290C', - "lbbrk;": '\U00002772', - "lbrace;": '\U0000007B', - "lbrack;": '\U0000005B', - "lbrke;": '\U0000298B', - "lbrksld;": '\U0000298F', - "lbrkslu;": '\U0000298D', - "lcaron;": '\U0000013E', - "lcedil;": '\U0000013C', - "lceil;": '\U00002308', - "lcub;": '\U0000007B', - "lcy;": '\U0000043B', - "ldca;": '\U00002936', - "ldquo;": '\U0000201C', - "ldquor;": '\U0000201E', - "ldrdhar;": '\U00002967', - "ldrushar;": '\U0000294B', - "ldsh;": '\U000021B2', - "le;": '\U00002264', - "leftarrow;": '\U00002190', - "leftarrowtail;": '\U000021A2', - "leftharpoondown;": '\U000021BD', - "leftharpoonup;": '\U000021BC', - "leftleftarrows;": '\U000021C7', - "leftrightarrow;": '\U00002194', - "leftrightarrows;": '\U000021C6', - "leftrightharpoons;": '\U000021CB', - "leftrightsquigarrow;": '\U000021AD', - "leftthreetimes;": '\U000022CB', - "leg;": '\U000022DA', - "leq;": '\U00002264', - "leqq;": '\U00002266', - "leqslant;": '\U00002A7D', - "les;": '\U00002A7D', - "lescc;": '\U00002AA8', - "lesdot;": '\U00002A7F', - "lesdoto;": '\U00002A81', - "lesdotor;": '\U00002A83', - "lesges;": '\U00002A93', - "lessapprox;": '\U00002A85', - "lessdot;": '\U000022D6', - "lesseqgtr;": '\U000022DA', - "lesseqqgtr;": '\U00002A8B', - "lessgtr;": '\U00002276', - "lesssim;": '\U00002272', - "lfisht;": '\U0000297C', - "lfloor;": '\U0000230A', - "lfr;": '\U0001D529', - "lg;": '\U00002276', - "lgE;": '\U00002A91', - "lhard;": '\U000021BD', - "lharu;": '\U000021BC', - "lharul;": '\U0000296A', - "lhblk;": '\U00002584', - "ljcy;": '\U00000459', - "ll;": '\U0000226A', - "llarr;": '\U000021C7', - "llcorner;": '\U0000231E', - "llhard;": '\U0000296B', - "lltri;": '\U000025FA', - "lmidot;": '\U00000140', - "lmoust;": '\U000023B0', - "lmoustache;": '\U000023B0', - "lnE;": '\U00002268', - "lnap;": '\U00002A89', - "lnapprox;": '\U00002A89', - "lne;": '\U00002A87', - "lneq;": '\U00002A87', - "lneqq;": '\U00002268', - "lnsim;": '\U000022E6', - "loang;": '\U000027EC', - "loarr;": '\U000021FD', - "lobrk;": '\U000027E6', - "longleftarrow;": '\U000027F5', - "longleftrightarrow;": '\U000027F7', - "longmapsto;": '\U000027FC', - "longrightarrow;": '\U000027F6', - "looparrowleft;": '\U000021AB', - "looparrowright;": '\U000021AC', - "lopar;": '\U00002985', - "lopf;": '\U0001D55D', - "loplus;": '\U00002A2D', - "lotimes;": '\U00002A34', - "lowast;": '\U00002217', - "lowbar;": '\U0000005F', - "loz;": '\U000025CA', - "lozenge;": '\U000025CA', - "lozf;": '\U000029EB', - "lpar;": '\U00000028', - "lparlt;": '\U00002993', - "lrarr;": '\U000021C6', - "lrcorner;": '\U0000231F', - "lrhar;": '\U000021CB', - "lrhard;": '\U0000296D', - "lrm;": '\U0000200E', - "lrtri;": '\U000022BF', - "lsaquo;": '\U00002039', - "lscr;": '\U0001D4C1', - "lsh;": '\U000021B0', - "lsim;": '\U00002272', - "lsime;": '\U00002A8D', - "lsimg;": '\U00002A8F', - "lsqb;": '\U0000005B', - "lsquo;": '\U00002018', - "lsquor;": '\U0000201A', - "lstrok;": '\U00000142', - "lt;": '\U0000003C', - "ltcc;": '\U00002AA6', - "ltcir;": '\U00002A79', - "ltdot;": '\U000022D6', - "lthree;": '\U000022CB', - "ltimes;": '\U000022C9', - "ltlarr;": '\U00002976', - "ltquest;": '\U00002A7B', - "ltrPar;": '\U00002996', - "ltri;": '\U000025C3', - "ltrie;": '\U000022B4', - "ltrif;": '\U000025C2', - "lurdshar;": '\U0000294A', - "luruhar;": '\U00002966', - "mDDot;": '\U0000223A', - "macr;": '\U000000AF', - "male;": '\U00002642', - "malt;": '\U00002720', - "maltese;": '\U00002720', - "map;": '\U000021A6', - "mapsto;": '\U000021A6', - "mapstodown;": '\U000021A7', - "mapstoleft;": '\U000021A4', - "mapstoup;": '\U000021A5', - "marker;": '\U000025AE', - "mcomma;": '\U00002A29', - "mcy;": '\U0000043C', - "mdash;": '\U00002014', - "measuredangle;": '\U00002221', - "mfr;": '\U0001D52A', - "mho;": '\U00002127', - "micro;": '\U000000B5', - "mid;": '\U00002223', - "midast;": '\U0000002A', - "midcir;": '\U00002AF0', - "middot;": '\U000000B7', - "minus;": '\U00002212', - "minusb;": '\U0000229F', - "minusd;": '\U00002238', - "minusdu;": '\U00002A2A', - "mlcp;": '\U00002ADB', - "mldr;": '\U00002026', - "mnplus;": '\U00002213', - "models;": '\U000022A7', - "mopf;": '\U0001D55E', - "mp;": '\U00002213', - "mscr;": '\U0001D4C2', - "mstpos;": '\U0000223E', - "mu;": '\U000003BC', - "multimap;": '\U000022B8', - "mumap;": '\U000022B8', - "nLeftarrow;": '\U000021CD', - "nLeftrightarrow;": '\U000021CE', - "nRightarrow;": '\U000021CF', - "nVDash;": '\U000022AF', - "nVdash;": '\U000022AE', - "nabla;": '\U00002207', - "nacute;": '\U00000144', - "nap;": '\U00002249', - "napos;": '\U00000149', - "napprox;": '\U00002249', - "natur;": '\U0000266E', - "natural;": '\U0000266E', - "naturals;": '\U00002115', - "nbsp;": '\U000000A0', - "ncap;": '\U00002A43', - "ncaron;": '\U00000148', - "ncedil;": '\U00000146', - "ncong;": '\U00002247', - "ncup;": '\U00002A42', - "ncy;": '\U0000043D', - "ndash;": '\U00002013', - "ne;": '\U00002260', - "neArr;": '\U000021D7', - "nearhk;": '\U00002924', - "nearr;": '\U00002197', - "nearrow;": '\U00002197', - "nequiv;": '\U00002262', - "nesear;": '\U00002928', - "nexist;": '\U00002204', - "nexists;": '\U00002204', - "nfr;": '\U0001D52B', - "nge;": '\U00002271', - "ngeq;": '\U00002271', - "ngsim;": '\U00002275', - "ngt;": '\U0000226F', - "ngtr;": '\U0000226F', - "nhArr;": '\U000021CE', - "nharr;": '\U000021AE', - "nhpar;": '\U00002AF2', - "ni;": '\U0000220B', - "nis;": '\U000022FC', - "nisd;": '\U000022FA', - "niv;": '\U0000220B', - "njcy;": '\U0000045A', - "nlArr;": '\U000021CD', - "nlarr;": '\U0000219A', - "nldr;": '\U00002025', - "nle;": '\U00002270', - "nleftarrow;": '\U0000219A', - "nleftrightarrow;": '\U000021AE', - "nleq;": '\U00002270', - "nless;": '\U0000226E', - "nlsim;": '\U00002274', - "nlt;": '\U0000226E', - "nltri;": '\U000022EA', - "nltrie;": '\U000022EC', - "nmid;": '\U00002224', - "nopf;": '\U0001D55F', - "not;": '\U000000AC', - "notin;": '\U00002209', - "notinva;": '\U00002209', - "notinvb;": '\U000022F7', - "notinvc;": '\U000022F6', - "notni;": '\U0000220C', - "notniva;": '\U0000220C', - "notnivb;": '\U000022FE', - "notnivc;": '\U000022FD', - "npar;": '\U00002226', - "nparallel;": '\U00002226', - "npolint;": '\U00002A14', - "npr;": '\U00002280', - "nprcue;": '\U000022E0', - "nprec;": '\U00002280', - "nrArr;": '\U000021CF', - "nrarr;": '\U0000219B', - "nrightarrow;": '\U0000219B', - "nrtri;": '\U000022EB', - "nrtrie;": '\U000022ED', - "nsc;": '\U00002281', - "nsccue;": '\U000022E1', - "nscr;": '\U0001D4C3', - "nshortmid;": '\U00002224', - "nshortparallel;": '\U00002226', - "nsim;": '\U00002241', - "nsime;": '\U00002244', - "nsimeq;": '\U00002244', - "nsmid;": '\U00002224', - "nspar;": '\U00002226', - "nsqsube;": '\U000022E2', - "nsqsupe;": '\U000022E3', - "nsub;": '\U00002284', - "nsube;": '\U00002288', - "nsubseteq;": '\U00002288', - "nsucc;": '\U00002281', - "nsup;": '\U00002285', - "nsupe;": '\U00002289', - "nsupseteq;": '\U00002289', - "ntgl;": '\U00002279', - "ntilde;": '\U000000F1', - "ntlg;": '\U00002278', - "ntriangleleft;": '\U000022EA', - "ntrianglelefteq;": '\U000022EC', - "ntriangleright;": '\U000022EB', - "ntrianglerighteq;": '\U000022ED', - "nu;": '\U000003BD', - "num;": '\U00000023', - "numero;": '\U00002116', - "numsp;": '\U00002007', - "nvDash;": '\U000022AD', - "nvHarr;": '\U00002904', - "nvdash;": '\U000022AC', - "nvinfin;": '\U000029DE', - "nvlArr;": '\U00002902', - "nvrArr;": '\U00002903', - "nwArr;": '\U000021D6', - "nwarhk;": '\U00002923', - "nwarr;": '\U00002196', - "nwarrow;": '\U00002196', - "nwnear;": '\U00002927', - "oS;": '\U000024C8', - "oacute;": '\U000000F3', - "oast;": '\U0000229B', - "ocir;": '\U0000229A', - "ocirc;": '\U000000F4', - "ocy;": '\U0000043E', - "odash;": '\U0000229D', - "odblac;": '\U00000151', - "odiv;": '\U00002A38', - "odot;": '\U00002299', - "odsold;": '\U000029BC', - "oelig;": '\U00000153', - "ofcir;": '\U000029BF', - "ofr;": '\U0001D52C', - "ogon;": '\U000002DB', - "ograve;": '\U000000F2', - "ogt;": '\U000029C1', - "ohbar;": '\U000029B5', - "ohm;": '\U000003A9', - "oint;": '\U0000222E', - "olarr;": '\U000021BA', - "olcir;": '\U000029BE', - "olcross;": '\U000029BB', - "oline;": '\U0000203E', - "olt;": '\U000029C0', - "omacr;": '\U0000014D', - "omega;": '\U000003C9', - "omicron;": '\U000003BF', - "omid;": '\U000029B6', - "ominus;": '\U00002296', - "oopf;": '\U0001D560', - "opar;": '\U000029B7', - "operp;": '\U000029B9', - "oplus;": '\U00002295', - "or;": '\U00002228', - "orarr;": '\U000021BB', - "ord;": '\U00002A5D', - "order;": '\U00002134', - "orderof;": '\U00002134', - "ordf;": '\U000000AA', - "ordm;": '\U000000BA', - "origof;": '\U000022B6', - "oror;": '\U00002A56', - "orslope;": '\U00002A57', - "orv;": '\U00002A5B', - "oscr;": '\U00002134', - "oslash;": '\U000000F8', - "osol;": '\U00002298', - "otilde;": '\U000000F5', - "otimes;": '\U00002297', - "otimesas;": '\U00002A36', - "ouml;": '\U000000F6', - "ovbar;": '\U0000233D', - "par;": '\U00002225', - "para;": '\U000000B6', - "parallel;": '\U00002225', - "parsim;": '\U00002AF3', - "parsl;": '\U00002AFD', - "part;": '\U00002202', - "pcy;": '\U0000043F', - "percnt;": '\U00000025', - "period;": '\U0000002E', - "permil;": '\U00002030', - "perp;": '\U000022A5', - "pertenk;": '\U00002031', - "pfr;": '\U0001D52D', - "phi;": '\U000003C6', - "phiv;": '\U000003D5', - "phmmat;": '\U00002133', - "phone;": '\U0000260E', - "pi;": '\U000003C0', - "pitchfork;": '\U000022D4', - "piv;": '\U000003D6', - "planck;": '\U0000210F', - "planckh;": '\U0000210E', - "plankv;": '\U0000210F', - "plus;": '\U0000002B', - "plusacir;": '\U00002A23', - "plusb;": '\U0000229E', - "pluscir;": '\U00002A22', - "plusdo;": '\U00002214', - "plusdu;": '\U00002A25', - "pluse;": '\U00002A72', - "plusmn;": '\U000000B1', - "plussim;": '\U00002A26', - "plustwo;": '\U00002A27', - "pm;": '\U000000B1', - "pointint;": '\U00002A15', - "popf;": '\U0001D561', - "pound;": '\U000000A3', - "pr;": '\U0000227A', - "prE;": '\U00002AB3', - "prap;": '\U00002AB7', - "prcue;": '\U0000227C', - "pre;": '\U00002AAF', - "prec;": '\U0000227A', - "precapprox;": '\U00002AB7', - "preccurlyeq;": '\U0000227C', - "preceq;": '\U00002AAF', - "precnapprox;": '\U00002AB9', - "precneqq;": '\U00002AB5', - "precnsim;": '\U000022E8', - "precsim;": '\U0000227E', - "prime;": '\U00002032', - "primes;": '\U00002119', - "prnE;": '\U00002AB5', - "prnap;": '\U00002AB9', - "prnsim;": '\U000022E8', - "prod;": '\U0000220F', - "profalar;": '\U0000232E', - "profline;": '\U00002312', - "profsurf;": '\U00002313', - "prop;": '\U0000221D', - "propto;": '\U0000221D', - "prsim;": '\U0000227E', - "prurel;": '\U000022B0', - "pscr;": '\U0001D4C5', - "psi;": '\U000003C8', - "puncsp;": '\U00002008', - "qfr;": '\U0001D52E', - "qint;": '\U00002A0C', - "qopf;": '\U0001D562', - "qprime;": '\U00002057', - "qscr;": '\U0001D4C6', - "quaternions;": '\U0000210D', - "quatint;": '\U00002A16', - "quest;": '\U0000003F', - "questeq;": '\U0000225F', - "quot;": '\U00000022', - "rAarr;": '\U000021DB', - "rArr;": '\U000021D2', - "rAtail;": '\U0000291C', - "rBarr;": '\U0000290F', - "rHar;": '\U00002964', - "racute;": '\U00000155', - "radic;": '\U0000221A', - "raemptyv;": '\U000029B3', - "rang;": '\U000027E9', - "rangd;": '\U00002992', - "range;": '\U000029A5', - "rangle;": '\U000027E9', - "raquo;": '\U000000BB', - "rarr;": '\U00002192', - "rarrap;": '\U00002975', - "rarrb;": '\U000021E5', - "rarrbfs;": '\U00002920', - "rarrc;": '\U00002933', - "rarrfs;": '\U0000291E', - "rarrhk;": '\U000021AA', - "rarrlp;": '\U000021AC', - "rarrpl;": '\U00002945', - "rarrsim;": '\U00002974', - "rarrtl;": '\U000021A3', - "rarrw;": '\U0000219D', - "ratail;": '\U0000291A', - "ratio;": '\U00002236', - "rationals;": '\U0000211A', - "rbarr;": '\U0000290D', - "rbbrk;": '\U00002773', - "rbrace;": '\U0000007D', - "rbrack;": '\U0000005D', - "rbrke;": '\U0000298C', - "rbrksld;": '\U0000298E', - "rbrkslu;": '\U00002990', - "rcaron;": '\U00000159', - "rcedil;": '\U00000157', - "rceil;": '\U00002309', - "rcub;": '\U0000007D', - "rcy;": '\U00000440', - "rdca;": '\U00002937', - "rdldhar;": '\U00002969', - "rdquo;": '\U0000201D', - "rdquor;": '\U0000201D', - "rdsh;": '\U000021B3', - "real;": '\U0000211C', - "realine;": '\U0000211B', - "realpart;": '\U0000211C', - "reals;": '\U0000211D', - "rect;": '\U000025AD', - "reg;": '\U000000AE', - "rfisht;": '\U0000297D', - "rfloor;": '\U0000230B', - "rfr;": '\U0001D52F', - "rhard;": '\U000021C1', - "rharu;": '\U000021C0', - "rharul;": '\U0000296C', - "rho;": '\U000003C1', - "rhov;": '\U000003F1', - "rightarrow;": '\U00002192', - "rightarrowtail;": '\U000021A3', - "rightharpoondown;": '\U000021C1', - "rightharpoonup;": '\U000021C0', - "rightleftarrows;": '\U000021C4', - "rightleftharpoons;": '\U000021CC', - "rightrightarrows;": '\U000021C9', - "rightsquigarrow;": '\U0000219D', - "rightthreetimes;": '\U000022CC', - "ring;": '\U000002DA', - "risingdotseq;": '\U00002253', - "rlarr;": '\U000021C4', - "rlhar;": '\U000021CC', - "rlm;": '\U0000200F', - "rmoust;": '\U000023B1', - "rmoustache;": '\U000023B1', - "rnmid;": '\U00002AEE', - "roang;": '\U000027ED', - "roarr;": '\U000021FE', - "robrk;": '\U000027E7', - "ropar;": '\U00002986', - "ropf;": '\U0001D563', - "roplus;": '\U00002A2E', - "rotimes;": '\U00002A35', - "rpar;": '\U00000029', - "rpargt;": '\U00002994', - "rppolint;": '\U00002A12', - "rrarr;": '\U000021C9', - "rsaquo;": '\U0000203A', - "rscr;": '\U0001D4C7', - "rsh;": '\U000021B1', - "rsqb;": '\U0000005D', - "rsquo;": '\U00002019', - "rsquor;": '\U00002019', - "rthree;": '\U000022CC', - "rtimes;": '\U000022CA', - "rtri;": '\U000025B9', - "rtrie;": '\U000022B5', - "rtrif;": '\U000025B8', - "rtriltri;": '\U000029CE', - "ruluhar;": '\U00002968', - "rx;": '\U0000211E', - "sacute;": '\U0000015B', - "sbquo;": '\U0000201A', - "sc;": '\U0000227B', - "scE;": '\U00002AB4', - "scap;": '\U00002AB8', - "scaron;": '\U00000161', - "sccue;": '\U0000227D', - "sce;": '\U00002AB0', - "scedil;": '\U0000015F', - "scirc;": '\U0000015D', - "scnE;": '\U00002AB6', - "scnap;": '\U00002ABA', - "scnsim;": '\U000022E9', - "scpolint;": '\U00002A13', - "scsim;": '\U0000227F', - "scy;": '\U00000441', - "sdot;": '\U000022C5', - "sdotb;": '\U000022A1', - "sdote;": '\U00002A66', - "seArr;": '\U000021D8', - "searhk;": '\U00002925', - "searr;": '\U00002198', - "searrow;": '\U00002198', - "sect;": '\U000000A7', - "semi;": '\U0000003B', - "seswar;": '\U00002929', - "setminus;": '\U00002216', - "setmn;": '\U00002216', - "sext;": '\U00002736', - "sfr;": '\U0001D530', - "sfrown;": '\U00002322', - "sharp;": '\U0000266F', - "shchcy;": '\U00000449', - "shcy;": '\U00000448', - "shortmid;": '\U00002223', - "shortparallel;": '\U00002225', - "shy;": '\U000000AD', - "sigma;": '\U000003C3', - "sigmaf;": '\U000003C2', - "sigmav;": '\U000003C2', - "sim;": '\U0000223C', - "simdot;": '\U00002A6A', - "sime;": '\U00002243', - "simeq;": '\U00002243', - "simg;": '\U00002A9E', - "simgE;": '\U00002AA0', - "siml;": '\U00002A9D', - "simlE;": '\U00002A9F', - "simne;": '\U00002246', - "simplus;": '\U00002A24', - "simrarr;": '\U00002972', - "slarr;": '\U00002190', - "smallsetminus;": '\U00002216', - "smashp;": '\U00002A33', - "smeparsl;": '\U000029E4', - "smid;": '\U00002223', - "smile;": '\U00002323', - "smt;": '\U00002AAA', - "smte;": '\U00002AAC', - "softcy;": '\U0000044C', - "sol;": '\U0000002F', - "solb;": '\U000029C4', - "solbar;": '\U0000233F', - "sopf;": '\U0001D564', - "spades;": '\U00002660', - "spadesuit;": '\U00002660', - "spar;": '\U00002225', - "sqcap;": '\U00002293', - "sqcup;": '\U00002294', - "sqsub;": '\U0000228F', - "sqsube;": '\U00002291', - "sqsubset;": '\U0000228F', - "sqsubseteq;": '\U00002291', - "sqsup;": '\U00002290', - "sqsupe;": '\U00002292', - "sqsupset;": '\U00002290', - "sqsupseteq;": '\U00002292', - "squ;": '\U000025A1', - "square;": '\U000025A1', - "squarf;": '\U000025AA', - "squf;": '\U000025AA', - "srarr;": '\U00002192', - "sscr;": '\U0001D4C8', - "ssetmn;": '\U00002216', - "ssmile;": '\U00002323', - "sstarf;": '\U000022C6', - "star;": '\U00002606', - "starf;": '\U00002605', - "straightepsilon;": '\U000003F5', - "straightphi;": '\U000003D5', - "strns;": '\U000000AF', - "sub;": '\U00002282', - "subE;": '\U00002AC5', - "subdot;": '\U00002ABD', - "sube;": '\U00002286', - "subedot;": '\U00002AC3', - "submult;": '\U00002AC1', - "subnE;": '\U00002ACB', - "subne;": '\U0000228A', - "subplus;": '\U00002ABF', - "subrarr;": '\U00002979', - "subset;": '\U00002282', - "subseteq;": '\U00002286', - "subseteqq;": '\U00002AC5', - "subsetneq;": '\U0000228A', - "subsetneqq;": '\U00002ACB', - "subsim;": '\U00002AC7', - "subsub;": '\U00002AD5', - "subsup;": '\U00002AD3', - "succ;": '\U0000227B', - "succapprox;": '\U00002AB8', - "succcurlyeq;": '\U0000227D', - "succeq;": '\U00002AB0', - "succnapprox;": '\U00002ABA', - "succneqq;": '\U00002AB6', - "succnsim;": '\U000022E9', - "succsim;": '\U0000227F', - "sum;": '\U00002211', - "sung;": '\U0000266A', - "sup;": '\U00002283', - "sup1;": '\U000000B9', - "sup2;": '\U000000B2', - "sup3;": '\U000000B3', - "supE;": '\U00002AC6', - "supdot;": '\U00002ABE', - "supdsub;": '\U00002AD8', - "supe;": '\U00002287', - "supedot;": '\U00002AC4', - "suphsol;": '\U000027C9', - "suphsub;": '\U00002AD7', - "suplarr;": '\U0000297B', - "supmult;": '\U00002AC2', - "supnE;": '\U00002ACC', - "supne;": '\U0000228B', - "supplus;": '\U00002AC0', - "supset;": '\U00002283', - "supseteq;": '\U00002287', - "supseteqq;": '\U00002AC6', - "supsetneq;": '\U0000228B', - "supsetneqq;": '\U00002ACC', - "supsim;": '\U00002AC8', - "supsub;": '\U00002AD4', - "supsup;": '\U00002AD6', - "swArr;": '\U000021D9', - "swarhk;": '\U00002926', - "swarr;": '\U00002199', - "swarrow;": '\U00002199', - "swnwar;": '\U0000292A', - "szlig;": '\U000000DF', - "target;": '\U00002316', - "tau;": '\U000003C4', - "tbrk;": '\U000023B4', - "tcaron;": '\U00000165', - "tcedil;": '\U00000163', - "tcy;": '\U00000442', - "tdot;": '\U000020DB', - "telrec;": '\U00002315', - "tfr;": '\U0001D531', - "there4;": '\U00002234', - "therefore;": '\U00002234', - "theta;": '\U000003B8', - "thetasym;": '\U000003D1', - "thetav;": '\U000003D1', - "thickapprox;": '\U00002248', - "thicksim;": '\U0000223C', - "thinsp;": '\U00002009', - "thkap;": '\U00002248', - "thksim;": '\U0000223C', - "thorn;": '\U000000FE', - "tilde;": '\U000002DC', - "times;": '\U000000D7', - "timesb;": '\U000022A0', - "timesbar;": '\U00002A31', - "timesd;": '\U00002A30', - "tint;": '\U0000222D', - "toea;": '\U00002928', - "top;": '\U000022A4', - "topbot;": '\U00002336', - "topcir;": '\U00002AF1', - "topf;": '\U0001D565', - "topfork;": '\U00002ADA', - "tosa;": '\U00002929', - "tprime;": '\U00002034', - "trade;": '\U00002122', - "triangle;": '\U000025B5', - "triangledown;": '\U000025BF', - "triangleleft;": '\U000025C3', - "trianglelefteq;": '\U000022B4', - "triangleq;": '\U0000225C', - "triangleright;": '\U000025B9', - "trianglerighteq;": '\U000022B5', - "tridot;": '\U000025EC', - "trie;": '\U0000225C', - "triminus;": '\U00002A3A', - "triplus;": '\U00002A39', - "trisb;": '\U000029CD', - "tritime;": '\U00002A3B', - "trpezium;": '\U000023E2', - "tscr;": '\U0001D4C9', - "tscy;": '\U00000446', - "tshcy;": '\U0000045B', - "tstrok;": '\U00000167', - "twixt;": '\U0000226C', - "twoheadleftarrow;": '\U0000219E', - "twoheadrightarrow;": '\U000021A0', - "uArr;": '\U000021D1', - "uHar;": '\U00002963', - "uacute;": '\U000000FA', - "uarr;": '\U00002191', - "ubrcy;": '\U0000045E', - "ubreve;": '\U0000016D', - "ucirc;": '\U000000FB', - "ucy;": '\U00000443', - "udarr;": '\U000021C5', - "udblac;": '\U00000171', - "udhar;": '\U0000296E', - "ufisht;": '\U0000297E', - "ufr;": '\U0001D532', - "ugrave;": '\U000000F9', - "uharl;": '\U000021BF', - "uharr;": '\U000021BE', - "uhblk;": '\U00002580', - "ulcorn;": '\U0000231C', - "ulcorner;": '\U0000231C', - "ulcrop;": '\U0000230F', - "ultri;": '\U000025F8', - "umacr;": '\U0000016B', - "uml;": '\U000000A8', - "uogon;": '\U00000173', - "uopf;": '\U0001D566', - "uparrow;": '\U00002191', - "updownarrow;": '\U00002195', - "upharpoonleft;": '\U000021BF', - "upharpoonright;": '\U000021BE', - "uplus;": '\U0000228E', - "upsi;": '\U000003C5', - "upsih;": '\U000003D2', - "upsilon;": '\U000003C5', - "upuparrows;": '\U000021C8', - "urcorn;": '\U0000231D', - "urcorner;": '\U0000231D', - "urcrop;": '\U0000230E', - "uring;": '\U0000016F', - "urtri;": '\U000025F9', - "uscr;": '\U0001D4CA', - "utdot;": '\U000022F0', - "utilde;": '\U00000169', - "utri;": '\U000025B5', - "utrif;": '\U000025B4', - "uuarr;": '\U000021C8', - "uuml;": '\U000000FC', - "uwangle;": '\U000029A7', - "vArr;": '\U000021D5', - "vBar;": '\U00002AE8', - "vBarv;": '\U00002AE9', - "vDash;": '\U000022A8', - "vangrt;": '\U0000299C', - "varepsilon;": '\U000003F5', - "varkappa;": '\U000003F0', - "varnothing;": '\U00002205', - "varphi;": '\U000003D5', - "varpi;": '\U000003D6', - "varpropto;": '\U0000221D', - "varr;": '\U00002195', - "varrho;": '\U000003F1', - "varsigma;": '\U000003C2', - "vartheta;": '\U000003D1', - "vartriangleleft;": '\U000022B2', - "vartriangleright;": '\U000022B3', - "vcy;": '\U00000432', - "vdash;": '\U000022A2', - "vee;": '\U00002228', - "veebar;": '\U000022BB', - "veeeq;": '\U0000225A', - "vellip;": '\U000022EE', - "verbar;": '\U0000007C', - "vert;": '\U0000007C', - "vfr;": '\U0001D533', - "vltri;": '\U000022B2', - "vopf;": '\U0001D567', - "vprop;": '\U0000221D', - "vrtri;": '\U000022B3', - "vscr;": '\U0001D4CB', - "vzigzag;": '\U0000299A', - "wcirc;": '\U00000175', - "wedbar;": '\U00002A5F', - "wedge;": '\U00002227', - "wedgeq;": '\U00002259', - "weierp;": '\U00002118', - "wfr;": '\U0001D534', - "wopf;": '\U0001D568', - "wp;": '\U00002118', - "wr;": '\U00002240', - "wreath;": '\U00002240', - "wscr;": '\U0001D4CC', - "xcap;": '\U000022C2', - "xcirc;": '\U000025EF', - "xcup;": '\U000022C3', - "xdtri;": '\U000025BD', - "xfr;": '\U0001D535', - "xhArr;": '\U000027FA', - "xharr;": '\U000027F7', - "xi;": '\U000003BE', - "xlArr;": '\U000027F8', - "xlarr;": '\U000027F5', - "xmap;": '\U000027FC', - "xnis;": '\U000022FB', - "xodot;": '\U00002A00', - "xopf;": '\U0001D569', - "xoplus;": '\U00002A01', - "xotime;": '\U00002A02', - "xrArr;": '\U000027F9', - "xrarr;": '\U000027F6', - "xscr;": '\U0001D4CD', - "xsqcup;": '\U00002A06', - "xuplus;": '\U00002A04', - "xutri;": '\U000025B3', - "xvee;": '\U000022C1', - "xwedge;": '\U000022C0', - "yacute;": '\U000000FD', - "yacy;": '\U0000044F', - "ycirc;": '\U00000177', - "ycy;": '\U0000044B', - "yen;": '\U000000A5', - "yfr;": '\U0001D536', - "yicy;": '\U00000457', - "yopf;": '\U0001D56A', - "yscr;": '\U0001D4CE', - "yucy;": '\U0000044E', - "yuml;": '\U000000FF', - "zacute;": '\U0000017A', - "zcaron;": '\U0000017E', - "zcy;": '\U00000437', - "zdot;": '\U0000017C', - "zeetrf;": '\U00002128', - "zeta;": '\U000003B6', - "zfr;": '\U0001D537', - "zhcy;": '\U00000436', - "zigrarr;": '\U000021DD', - "zopf;": '\U0001D56B', - "zscr;": '\U0001D4CF', - "zwj;": '\U0000200D', - "zwnj;": '\U0000200C', - "AElig": '\U000000C6', - "AMP": '\U00000026', - "Aacute": '\U000000C1', - "Acirc": '\U000000C2', - "Agrave": '\U000000C0', - "Aring": '\U000000C5', - "Atilde": '\U000000C3', - "Auml": '\U000000C4', - "COPY": '\U000000A9', - "Ccedil": '\U000000C7', - "ETH": '\U000000D0', - "Eacute": '\U000000C9', - "Ecirc": '\U000000CA', - "Egrave": '\U000000C8', - "Euml": '\U000000CB', - "GT": '\U0000003E', - "Iacute": '\U000000CD', - "Icirc": '\U000000CE', - "Igrave": '\U000000CC', - "Iuml": '\U000000CF', - "LT": '\U0000003C', - "Ntilde": '\U000000D1', - "Oacute": '\U000000D3', - "Ocirc": '\U000000D4', - "Ograve": '\U000000D2', - "Oslash": '\U000000D8', - "Otilde": '\U000000D5', - "Ouml": '\U000000D6', - "QUOT": '\U00000022', - "REG": '\U000000AE', - "THORN": '\U000000DE', - "Uacute": '\U000000DA', - "Ucirc": '\U000000DB', - "Ugrave": '\U000000D9', - "Uuml": '\U000000DC', - "Yacute": '\U000000DD', - "aacute": '\U000000E1', - "acirc": '\U000000E2', - "acute": '\U000000B4', - "aelig": '\U000000E6', - "agrave": '\U000000E0', - "amp": '\U00000026', - "aring": '\U000000E5', - "atilde": '\U000000E3', - "auml": '\U000000E4', - "brvbar": '\U000000A6', - "ccedil": '\U000000E7', - "cedil": '\U000000B8', - "cent": '\U000000A2', - "copy": '\U000000A9', - "curren": '\U000000A4', - "deg": '\U000000B0', - "divide": '\U000000F7', - "eacute": '\U000000E9', - "ecirc": '\U000000EA', - "egrave": '\U000000E8', - "eth": '\U000000F0', - "euml": '\U000000EB', - "frac12": '\U000000BD', - "frac14": '\U000000BC', - "frac34": '\U000000BE', - "gt": '\U0000003E', - "iacute": '\U000000ED', - "icirc": '\U000000EE', - "iexcl": '\U000000A1', - "igrave": '\U000000EC', - "iquest": '\U000000BF', - "iuml": '\U000000EF', - "laquo": '\U000000AB', - "lt": '\U0000003C', - "macr": '\U000000AF', - "micro": '\U000000B5', - "middot": '\U000000B7', - "nbsp": '\U000000A0', - "not": '\U000000AC', - "ntilde": '\U000000F1', - "oacute": '\U000000F3', - "ocirc": '\U000000F4', - "ograve": '\U000000F2', - "ordf": '\U000000AA', - "ordm": '\U000000BA', - "oslash": '\U000000F8', - "otilde": '\U000000F5', - "ouml": '\U000000F6', - "para": '\U000000B6', - "plusmn": '\U000000B1', - "pound": '\U000000A3', - "quot": '\U00000022', - "raquo": '\U000000BB', - "reg": '\U000000AE', - "sect": '\U000000A7', - "shy": '\U000000AD', - "sup1": '\U000000B9', - "sup2": '\U000000B2', - "sup3": '\U000000B3', - "szlig": '\U000000DF', - "thorn": '\U000000FE', - "times": '\U000000D7', - "uacute": '\U000000FA', - "ucirc": '\U000000FB', - "ugrave": '\U000000F9', - "uml": '\U000000A8', - "uuml": '\U000000FC', - "yacute": '\U000000FD', - "yen": '\U000000A5', - "yuml": '\U000000FF', -} - -// HTML entities that are two unicode codepoints. -var entity2 = map[string][2]rune{ - // TODO(nigeltao): Handle replacements that are wider than their names. - // "nLt;": {'\u226A', '\u20D2'}, - // "nGt;": {'\u226B', '\u20D2'}, - "NotEqualTilde;": {'\u2242', '\u0338'}, - "NotGreaterFullEqual;": {'\u2267', '\u0338'}, - "NotGreaterGreater;": {'\u226B', '\u0338'}, - "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, - "NotHumpDownHump;": {'\u224E', '\u0338'}, - "NotHumpEqual;": {'\u224F', '\u0338'}, - "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, - "NotLessLess;": {'\u226A', '\u0338'}, - "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, - "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, - "NotNestedLessLess;": {'\u2AA1', '\u0338'}, - "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, - "NotRightTriangleBar;": {'\u29D0', '\u0338'}, - "NotSquareSubset;": {'\u228F', '\u0338'}, - "NotSquareSuperset;": {'\u2290', '\u0338'}, - "NotSubset;": {'\u2282', '\u20D2'}, - "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, - "NotSucceedsTilde;": {'\u227F', '\u0338'}, - "NotSuperset;": {'\u2283', '\u20D2'}, - "ThickSpace;": {'\u205F', '\u200A'}, - "acE;": {'\u223E', '\u0333'}, - "bne;": {'\u003D', '\u20E5'}, - "bnequiv;": {'\u2261', '\u20E5'}, - "caps;": {'\u2229', '\uFE00'}, - "cups;": {'\u222A', '\uFE00'}, - "fjlig;": {'\u0066', '\u006A'}, - "gesl;": {'\u22DB', '\uFE00'}, - "gvertneqq;": {'\u2269', '\uFE00'}, - "gvnE;": {'\u2269', '\uFE00'}, - "lates;": {'\u2AAD', '\uFE00'}, - "lesg;": {'\u22DA', '\uFE00'}, - "lvertneqq;": {'\u2268', '\uFE00'}, - "lvnE;": {'\u2268', '\uFE00'}, - "nGg;": {'\u22D9', '\u0338'}, - "nGtv;": {'\u226B', '\u0338'}, - "nLl;": {'\u22D8', '\u0338'}, - "nLtv;": {'\u226A', '\u0338'}, - "nang;": {'\u2220', '\u20D2'}, - "napE;": {'\u2A70', '\u0338'}, - "napid;": {'\u224B', '\u0338'}, - "nbump;": {'\u224E', '\u0338'}, - "nbumpe;": {'\u224F', '\u0338'}, - "ncongdot;": {'\u2A6D', '\u0338'}, - "nedot;": {'\u2250', '\u0338'}, - "nesim;": {'\u2242', '\u0338'}, - "ngE;": {'\u2267', '\u0338'}, - "ngeqq;": {'\u2267', '\u0338'}, - "ngeqslant;": {'\u2A7E', '\u0338'}, - "nges;": {'\u2A7E', '\u0338'}, - "nlE;": {'\u2266', '\u0338'}, - "nleqq;": {'\u2266', '\u0338'}, - "nleqslant;": {'\u2A7D', '\u0338'}, - "nles;": {'\u2A7D', '\u0338'}, - "notinE;": {'\u22F9', '\u0338'}, - "notindot;": {'\u22F5', '\u0338'}, - "nparsl;": {'\u2AFD', '\u20E5'}, - "npart;": {'\u2202', '\u0338'}, - "npre;": {'\u2AAF', '\u0338'}, - "npreceq;": {'\u2AAF', '\u0338'}, - "nrarrc;": {'\u2933', '\u0338'}, - "nrarrw;": {'\u219D', '\u0338'}, - "nsce;": {'\u2AB0', '\u0338'}, - "nsubE;": {'\u2AC5', '\u0338'}, - "nsubset;": {'\u2282', '\u20D2'}, - "nsubseteqq;": {'\u2AC5', '\u0338'}, - "nsucceq;": {'\u2AB0', '\u0338'}, - "nsupE;": {'\u2AC6', '\u0338'}, - "nsupset;": {'\u2283', '\u20D2'}, - "nsupseteqq;": {'\u2AC6', '\u0338'}, - "nvap;": {'\u224D', '\u20D2'}, - "nvge;": {'\u2265', '\u20D2'}, - "nvgt;": {'\u003E', '\u20D2'}, - "nvle;": {'\u2264', '\u20D2'}, - "nvlt;": {'\u003C', '\u20D2'}, - "nvltrie;": {'\u22B4', '\u20D2'}, - "nvrtrie;": {'\u22B5', '\u20D2'}, - "nvsim;": {'\u223C', '\u20D2'}, - "race;": {'\u223D', '\u0331'}, - "smtes;": {'\u2AAC', '\uFE00'}, - "sqcaps;": {'\u2293', '\uFE00'}, - "sqcups;": {'\u2294', '\uFE00'}, - "varsubsetneq;": {'\u228A', '\uFE00'}, - "varsubsetneqq;": {'\u2ACB', '\uFE00'}, - "varsupsetneq;": {'\u228B', '\uFE00'}, - "varsupsetneqq;": {'\u2ACC', '\uFE00'}, - "vnsub;": {'\u2282', '\u20D2'}, - "vnsup;": {'\u2283', '\u20D2'}, - "vsubnE;": {'\u2ACB', '\uFE00'}, - "vsubne;": {'\u228A', '\uFE00'}, - "vsupnE;": {'\u2ACC', '\uFE00'}, - "vsupne;": {'\u228B', '\uFE00'}, -} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go deleted file mode 100644 index d856139..0000000 --- a/vendor/golang.org/x/net/html/escape.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// These replacements permit compatibility with old numeric entities that -// assumed Windows-1252 encoding. -// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference -var replacementTable = [...]rune{ - '\u20AC', // First entry is what 0x80 should be replaced with. - '\u0081', - '\u201A', - '\u0192', - '\u201E', - '\u2026', - '\u2020', - '\u2021', - '\u02C6', - '\u2030', - '\u0160', - '\u2039', - '\u0152', - '\u008D', - '\u017D', - '\u008F', - '\u0090', - '\u2018', - '\u2019', - '\u201C', - '\u201D', - '\u2022', - '\u2013', - '\u2014', - '\u02DC', - '\u2122', - '\u0161', - '\u203A', - '\u0153', - '\u009D', - '\u017E', - '\u0178', // Last entry is 0x9F. - // 0x00->'\uFFFD' is handled programmatically. - // 0x0D->'\u000D' is a no-op. -} - -// unescapeEntity reads an entity like "<" from b[src:] and writes the -// corresponding "<" to b[dst:], returning the incremented dst and src cursors. -// Precondition: b[src] == '&' && dst <= src. -// attribute should be true if parsing an attribute value. -func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { - // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference - - // i starts at 1 because we already know that s[0] == '&'. - i, s := 1, b[src:] - - if len(s) <= 1 { - b[dst] = b[src] - return dst + 1, src + 1 - } - - if s[i] == '#' { - if len(s) <= 3 { // We need to have at least "&#.". - b[dst] = b[src] - return dst + 1, src + 1 - } - i++ - c := s[i] - hex := false - if c == 'x' || c == 'X' { - hex = true - i++ - } - - x := '\x00' - for i < len(s) { - c = s[i] - i++ - if hex { - if '0' <= c && c <= '9' { - x = 16*x + rune(c) - '0' - continue - } else if 'a' <= c && c <= 'f' { - x = 16*x + rune(c) - 'a' + 10 - continue - } else if 'A' <= c && c <= 'F' { - x = 16*x + rune(c) - 'A' + 10 - continue - } - } else if '0' <= c && c <= '9' { - x = 10*x + rune(c) - '0' - continue - } - if c != ';' { - i-- - } - break - } - - if i <= 3 { // No characters matched. - b[dst] = b[src] - return dst + 1, src + 1 - } - - if 0x80 <= x && x <= 0x9F { - // Replace characters from Windows-1252 with UTF-8 equivalents. - x = replacementTable[x-0x80] - } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { - // Replace invalid characters with the replacement character. - x = '\uFFFD' - } - - return dst + utf8.EncodeRune(b[dst:], x), src + i - } - - // Consume the maximum number of characters possible, with the - // consumed characters matching one of the named references. - - for i < len(s) { - c := s[i] - i++ - // Lower-cased characters are more common in entities, so we check for them first. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - continue - } - if c != ';' { - i-- - } - break - } - - entityName := string(s[1:i]) - if entityName == "" { - // No-op. - } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { - // No-op. - } else if x := entity[entityName]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + i - } else if x := entity2[entityName]; x[0] != 0 { - dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) - return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i - } else if !attribute { - maxLen := len(entityName) - 1 - if maxLen > longestEntityWithoutSemicolon { - maxLen = longestEntityWithoutSemicolon - } - for j := maxLen; j > 1; j-- { - if x := entity[entityName[:j]]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 - } - } - } - - dst1, src1 = dst+i, src+i - copy(b[dst:dst1], b[src:src1]) - return dst1, src1 -} - -// unescape unescapes b's entities in-place, so that "a<b" becomes "a': - esc = ">" - case '"': - // """ is shorter than """. - esc = """ - case '\r': - esc = " " - default: - panic("unrecognized escape character") - } - s = s[i+1:] - if _, err := w.WriteString(esc); err != nil { - return err - } - i = strings.IndexAny(s, escapedChars) - } - _, err := w.WriteString(s) - return err -} - -// EscapeString escapes special characters like "<" to become "<". It -// escapes only five such characters: <, >, &, ' and ". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func EscapeString(s string) string { - if strings.IndexAny(s, escapedChars) == -1 { - return s - } - var buf bytes.Buffer - escape(&buf, s) - return buf.String() -} - -// UnescapeString unescapes entities like "<" to become "<". It unescapes a -// larger range of entities than EscapeString escapes. For example, "á" -// unescapes to "á", as does "á" and "&xE1;". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func UnescapeString(s string) string { - for _, c := range s { - if c == '&' { - return string(unescape([]byte(s), false)) - } - } - return s -} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go deleted file mode 100644 index d3b3844..0000000 --- a/vendor/golang.org/x/net/html/foreign.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { - for i := range aa { - if newName, ok := nameMap[aa[i].Key]; ok { - aa[i].Key = newName - } - } -} - -func adjustForeignAttributes(aa []Attribute) { - for i, a := range aa { - if a.Key == "" || a.Key[0] != 'x' { - continue - } - switch a.Key { - case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", - "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": - j := strings.Index(a.Key, ":") - aa[i].Namespace = a.Key[:j] - aa[i].Key = a.Key[j+1:] - } - } -} - -func htmlIntegrationPoint(n *Node) bool { - if n.Type != ElementNode { - return false - } - switch n.Namespace { - case "math": - if n.Data == "annotation-xml" { - for _, a := range n.Attr { - if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { - return true - } - } - } - } - case "svg": - switch n.Data { - case "desc", "foreignObject", "title": - return true - } - } - return false -} - -func mathMLTextIntegrationPoint(n *Node) bool { - if n.Namespace != "math" { - return false - } - switch n.Data { - case "mi", "mo", "mn", "ms", "mtext": - return true - } - return false -} - -// Section 12.2.5.5. -var breakout = map[string]bool{ - "b": true, - "big": true, - "blockquote": true, - "body": true, - "br": true, - "center": true, - "code": true, - "dd": true, - "div": true, - "dl": true, - "dt": true, - "em": true, - "embed": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "hr": true, - "i": true, - "img": true, - "li": true, - "listing": true, - "menu": true, - "meta": true, - "nobr": true, - "ol": true, - "p": true, - "pre": true, - "ruby": true, - "s": true, - "small": true, - "span": true, - "strong": true, - "strike": true, - "sub": true, - "sup": true, - "table": true, - "tt": true, - "u": true, - "ul": true, - "var": true, -} - -// Section 12.2.5.5. -var svgTagNameAdjustments = map[string]string{ - "altglyph": "altGlyph", - "altglyphdef": "altGlyphDef", - "altglyphitem": "altGlyphItem", - "animatecolor": "animateColor", - "animatemotion": "animateMotion", - "animatetransform": "animateTransform", - "clippath": "clipPath", - "feblend": "feBlend", - "fecolormatrix": "feColorMatrix", - "fecomponenttransfer": "feComponentTransfer", - "fecomposite": "feComposite", - "feconvolvematrix": "feConvolveMatrix", - "fediffuselighting": "feDiffuseLighting", - "fedisplacementmap": "feDisplacementMap", - "fedistantlight": "feDistantLight", - "feflood": "feFlood", - "fefunca": "feFuncA", - "fefuncb": "feFuncB", - "fefuncg": "feFuncG", - "fefuncr": "feFuncR", - "fegaussianblur": "feGaussianBlur", - "feimage": "feImage", - "femerge": "feMerge", - "femergenode": "feMergeNode", - "femorphology": "feMorphology", - "feoffset": "feOffset", - "fepointlight": "fePointLight", - "fespecularlighting": "feSpecularLighting", - "fespotlight": "feSpotLight", - "fetile": "feTile", - "feturbulence": "feTurbulence", - "foreignobject": "foreignObject", - "glyphref": "glyphRef", - "lineargradient": "linearGradient", - "radialgradient": "radialGradient", - "textpath": "textPath", -} - -// Section 12.2.5.1 -var mathMLAttributeAdjustments = map[string]string{ - "definitionurl": "definitionURL", -} - -var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", -} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go deleted file mode 100644 index 26b657a..0000000 --- a/vendor/golang.org/x/net/html/node.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "golang.org/x/net/html/atom" -) - -// A NodeType is the type of a Node. -type NodeType uint32 - -const ( - ErrorNode NodeType = iota - TextNode - DocumentNode - ElementNode - CommentNode - DoctypeNode - scopeMarkerNode -) - -// Section 12.2.3.3 says "scope markers are inserted when entering applet -// elements, buttons, object elements, marquees, table cells, and table -// captions, and are used to prevent formatting from 'leaking'". -var scopeMarker = Node{Type: scopeMarkerNode} - -// A Node consists of a NodeType and some Data (tag name for element nodes, -// content for text) and are part of a tree of Nodes. Element nodes may also -// have a Namespace and contain a slice of Attributes. Data is unescaped, so -// that it looks like "a 0 { - return (*s)[i-1] - } - return nil -} - -// index returns the index of the top-most occurrence of n in the stack, or -1 -// if n is not present. -func (s *nodeStack) index(n *Node) int { - for i := len(*s) - 1; i >= 0; i-- { - if (*s)[i] == n { - return i - } - } - return -1 -} - -// insert inserts a node at the given index. -func (s *nodeStack) insert(i int, n *Node) { - (*s) = append(*s, nil) - copy((*s)[i+1:], (*s)[i:]) - (*s)[i] = n -} - -// remove removes a node from the stack. It is a no-op if n is not present. -func (s *nodeStack) remove(n *Node) { - i := s.index(n) - if i == -1 { - return - } - copy((*s)[i:], (*s)[i+1:]) - j := len(*s) - 1 - (*s)[j] = nil - *s = (*s)[:j] -} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go deleted file mode 100644 index be4b2bf..0000000 --- a/vendor/golang.org/x/net/html/parse.go +++ /dev/null @@ -1,2094 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "errors" - "fmt" - "io" - "strings" - - a "golang.org/x/net/html/atom" -) - -// A parser implements the HTML5 parsing algorithm: -// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction -type parser struct { - // tokenizer provides the tokens for the parser. - tokenizer *Tokenizer - // tok is the most recently read token. - tok Token - // Self-closing tags like
are treated as start tags, except that - // hasSelfClosingToken is set while they are being processed. - hasSelfClosingToken bool - // doc is the document root element. - doc *Node - // The stack of open elements (section 12.2.3.2) and active formatting - // elements (section 12.2.3.3). - oe, afe nodeStack - // Element pointers (section 12.2.3.4). - head, form *Node - // Other parsing state flags (section 12.2.3.5). - scripting, framesetOK bool - // im is the current insertion mode. - im insertionMode - // originalIM is the insertion mode to go back to after completing a text - // or inTableText insertion mode. - originalIM insertionMode - // fosterParenting is whether new elements should be inserted according to - // the foster parenting rules (section 12.2.5.3). - fosterParenting bool - // quirks is whether the parser is operating in "quirks mode." - quirks bool - // fragment is whether the parser is parsing an HTML fragment. - fragment bool - // context is the context element when parsing an HTML fragment - // (section 12.4). - context *Node -} - -func (p *parser) top() *Node { - if n := p.oe.top(); n != nil { - return n - } - return p.doc -} - -// Stop tags for use in popUntil. These come from section 12.2.3.2. -var ( - defaultScopeStopTags = map[string][]a.Atom{ - "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, - "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, - "svg": {a.Desc, a.ForeignObject, a.Title}, - } -) - -type scope int - -const ( - defaultScope scope = iota - listItemScope - buttonScope - tableScope - tableRowScope - tableBodyScope - selectScope -) - -// popUntil pops the stack of open elements at the highest element whose tag -// is in matchTags, provided there is no higher element in the scope's stop -// tags (as defined in section 12.2.3.2). It returns whether or not there was -// such an element. If there was not, popUntil leaves the stack unchanged. -// -// For example, the set of stop tags for table scope is: "html", "table". If -// the stack was: -// ["html", "body", "font", "table", "b", "i", "u"] -// then popUntil(tableScope, "font") would return false, but -// popUntil(tableScope, "i") would return true and the stack would become: -// ["html", "body", "font", "table", "b"] -// -// If an element's tag is in both the stop tags and matchTags, then the stack -// will be popped and the function returns true (provided, of course, there was -// no higher element in the stack that was also in the stop tags). For example, -// popUntil(tableScope, "table") returns true and leaves: -// ["html", "body", "font"] -func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { - if i := p.indexOfElementInScope(s, matchTags...); i != -1 { - p.oe = p.oe[:i] - return true - } - return false -} - -// indexOfElementInScope returns the index in p.oe of the highest element whose -// tag is in matchTags that is in scope. If no matching element is in scope, it -// returns -1. -func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - if p.oe[i].Namespace == "" { - for _, t := range matchTags { - if t == tagAtom { - return i - } - } - switch s { - case defaultScope: - // No-op. - case listItemScope: - if tagAtom == a.Ol || tagAtom == a.Ul { - return -1 - } - case buttonScope: - if tagAtom == a.Button { - return -1 - } - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table { - return -1 - } - case selectScope: - if tagAtom != a.Optgroup && tagAtom != a.Option { - return -1 - } - default: - panic("unreachable") - } - } - switch s { - case defaultScope, listItemScope, buttonScope: - for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { - if t == tagAtom { - return -1 - } - } - } - } - return -1 -} - -// elementInScope is like popUntil, except that it doesn't modify the stack of -// open elements. -func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { - return p.indexOfElementInScope(s, matchTags...) != -1 -} - -// clearStackToContext pops elements off the stack of open elements until a -// scope-defined element is found. -func (p *parser) clearStackToContext(s scope) { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - switch s { - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table { - p.oe = p.oe[:i+1] - return - } - case tableRowScope: - if tagAtom == a.Html || tagAtom == a.Tr { - p.oe = p.oe[:i+1] - return - } - case tableBodyScope: - if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { - p.oe = p.oe[:i+1] - return - } - default: - panic("unreachable") - } - } -} - -// generateImpliedEndTags pops nodes off the stack of open elements as long as -// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. -// If exceptions are specified, nodes with that name will not be popped off. -func (p *parser) generateImpliedEndTags(exceptions ...string) { - var i int -loop: - for i = len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if n.Type == ElementNode { - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: - for _, except := range exceptions { - if n.Data == except { - break loop - } - } - continue - } - } - break - } - - p.oe = p.oe[:i+1] -} - -// addChild adds a child node n to the top element, and pushes n onto the stack -// of open elements if it is an element node. -func (p *parser) addChild(n *Node) { - if p.shouldFosterParent() { - p.fosterParent(n) - } else { - p.top().AppendChild(n) - } - - if n.Type == ElementNode { - p.oe = append(p.oe, n) - } -} - -// shouldFosterParent returns whether the next node to be added should be -// foster parented. -func (p *parser) shouldFosterParent() bool { - if p.fosterParenting { - switch p.top().DataAtom { - case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: - return true - } - } - return false -} - -// fosterParent adds a child node according to the foster parenting rules. -// Section 12.2.5.3, "foster parenting". -func (p *parser) fosterParent(n *Node) { - var table, parent, prev *Node - var i int - for i = len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].DataAtom == a.Table { - table = p.oe[i] - break - } - } - - if table == nil { - // The foster parent is the html element. - parent = p.oe[0] - } else { - parent = table.Parent - } - if parent == nil { - parent = p.oe[i-1] - } - - if table != nil { - prev = table.PrevSibling - } else { - prev = parent.LastChild - } - if prev != nil && prev.Type == TextNode && n.Type == TextNode { - prev.Data += n.Data - return - } - - parent.InsertBefore(n, table) -} - -// addText adds text to the preceding node if it is a text node, or else it -// calls addChild with a new text node. -func (p *parser) addText(text string) { - if text == "" { - return - } - - if p.shouldFosterParent() { - p.fosterParent(&Node{ - Type: TextNode, - Data: text, - }) - return - } - - t := p.top() - if n := t.LastChild; n != nil && n.Type == TextNode { - n.Data += text - return - } - p.addChild(&Node{ - Type: TextNode, - Data: text, - }) -} - -// addElement adds a child element based on the current token. -func (p *parser) addElement() { - p.addChild(&Node{ - Type: ElementNode, - DataAtom: p.tok.DataAtom, - Data: p.tok.Data, - Attr: p.tok.Attr, - }) -} - -// Section 12.2.3.3. -func (p *parser) addFormattingElement() { - tagAtom, attr := p.tok.DataAtom, p.tok.Attr - p.addElement() - - // Implement the Noah's Ark clause, but with three per family instead of two. - identicalElements := 0 -findIdenticalElements: - for i := len(p.afe) - 1; i >= 0; i-- { - n := p.afe[i] - if n.Type == scopeMarkerNode { - break - } - if n.Type != ElementNode { - continue - } - if n.Namespace != "" { - continue - } - if n.DataAtom != tagAtom { - continue - } - if len(n.Attr) != len(attr) { - continue - } - compareAttributes: - for _, t0 := range n.Attr { - for _, t1 := range attr { - if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { - // Found a match for this attribute, continue with the next attribute. - continue compareAttributes - } - } - // If we get here, there is no attribute that matches a. - // Therefore the element is not identical to the new one. - continue findIdenticalElements - } - - identicalElements++ - if identicalElements >= 3 { - p.afe.remove(n) - } - } - - p.afe = append(p.afe, p.top()) -} - -// Section 12.2.3.3. -func (p *parser) clearActiveFormattingElements() { - for { - n := p.afe.pop() - if len(p.afe) == 0 || n.Type == scopeMarkerNode { - return - } - } -} - -// Section 12.2.3.3. -func (p *parser) reconstructActiveFormattingElements() { - n := p.afe.top() - if n == nil { - return - } - if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { - return - } - i := len(p.afe) - 1 - for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { - if i == 0 { - i = -1 - break - } - i-- - n = p.afe[i] - } - for { - i++ - clone := p.afe[i].clone() - p.addChild(clone) - p.afe[i] = clone - if i == len(p.afe)-1 { - break - } - } -} - -// Section 12.2.4. -func (p *parser) acknowledgeSelfClosingTag() { - p.hasSelfClosingToken = false -} - -// An insertion mode (section 12.2.3.1) is the state transition function from -// a particular state in the HTML5 parser's state machine. It updates the -// parser's fields depending on parser.tok (where ErrorToken means EOF). -// It returns whether the token was consumed. -type insertionMode func(*parser) bool - -// setOriginalIM sets the insertion mode to return to after completing a text or -// inTableText insertion mode. -// Section 12.2.3.1, "using the rules for". -func (p *parser) setOriginalIM() { - if p.originalIM != nil { - panic("html: bad parser state: originalIM was set twice") - } - p.originalIM = p.im -} - -// Section 12.2.3.1, "reset the insertion mode". -func (p *parser) resetInsertionMode() { - for i := len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if i == 0 && p.context != nil { - n = p.context - } - - switch n.DataAtom { - case a.Select: - p.im = inSelectIM - case a.Td, a.Th: - p.im = inCellIM - case a.Tr: - p.im = inRowIM - case a.Tbody, a.Thead, a.Tfoot: - p.im = inTableBodyIM - case a.Caption: - p.im = inCaptionIM - case a.Colgroup: - p.im = inColumnGroupIM - case a.Table: - p.im = inTableIM - case a.Head: - p.im = inBodyIM - case a.Body: - p.im = inBodyIM - case a.Frameset: - p.im = inFramesetIM - case a.Html: - p.im = beforeHeadIM - default: - continue - } - return - } - p.im = inBodyIM -} - -const whitespace = " \t\r\n\f" - -// Section 12.2.5.4.1. -func initialIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - n, quirks := parseDoctype(p.tok.Data) - p.doc.AppendChild(n) - p.quirks = quirks - p.im = beforeHTMLIM - return true - } - p.quirks = true - p.im = beforeHTMLIM - return false -} - -// Section 12.2.5.4.2. -func beforeHTMLIM(p *parser) bool { - switch p.tok.Type { - case DoctypeToken: - // Ignore the token. - return true - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - if p.tok.DataAtom == a.Html { - p.addElement() - p.im = beforeHeadIM - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - } - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false -} - -// Section 12.2.5.4.3. -func beforeHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Head: - p.addElement() - p.head = p.top() - p.im = inHeadIM - return true - case a.Html: - return inBodyIM(p) - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.5.4.4. -func inHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: - p.addElement() - p.oe.pop() - p.acknowledgeSelfClosingTag() - return true - case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: - p.addElement() - p.setOriginalIM() - p.im = textIM - return true - case a.Head: - // Ignore the token. - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head: - n := p.oe.pop() - if n.DataAtom != a.Head { - panic("html: bad parser state: element not found, in the in-head insertion mode") - } - p.im = afterHeadIM - return true - case a.Body, a.Html, a.Br: - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.5.4.6. -func afterHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Body: - p.addElement() - p.framesetOK = false - p.im = inBodyIM - return true - case a.Frameset: - p.addElement() - p.im = inFramesetIM - return true - case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: - p.oe = append(p.oe, p.head) - defer p.oe.remove(p.head) - return inHeadIM(p) - case a.Head: - // Ignore the token. - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Body, a.Html, a.Br: - // Drop down to creating an implied tag. - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) - p.framesetOK = true - return false -} - -// copyAttributes copies attributes of src not found on dst to dst. -func copyAttributes(dst *Node, src Token) { - if len(src.Attr) == 0 { - return - } - attr := map[string]string{} - for _, t := range dst.Attr { - attr[t.Key] = t.Val - } - for _, t := range src.Attr { - if _, ok := attr[t.Key]; !ok { - dst.Attr = append(dst.Attr, t) - attr[t.Key] = t.Val - } - } -} - -// Section 12.2.5.4.7. -func inBodyIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - d := p.tok.Data - switch n := p.oe.top(); n.DataAtom { - case a.Pre, a.Listing: - if n.FirstChild == nil { - // Ignore a newline at the start of a
 block.
-				if d != "" && d[0] == '\r' {
-					d = d[1:]
-				}
-				if d != "" && d[0] == '\n' {
-					d = d[1:]
-				}
-			}
-		}
-		d = strings.Replace(d, "\x00", "", -1)
-		if d == "" {
-			return true
-		}
-		p.reconstructActiveFormattingElements()
-		p.addText(d)
-		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
-			// There were non-whitespace characters inserted.
-			p.framesetOK = false
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			copyAttributes(p.oe[0], p.tok)
-		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
-			return inHeadIM(p)
-		case a.Body:
-			if len(p.oe) >= 2 {
-				body := p.oe[1]
-				if body.Type == ElementNode && body.DataAtom == a.Body {
-					p.framesetOK = false
-					copyAttributes(body, p.tok)
-				}
-			}
-		case a.Frameset:
-			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
-				// Ignore the token.
-				return true
-			}
-			body := p.oe[1]
-			if body.Parent != nil {
-				body.Parent.RemoveChild(body)
-			}
-			p.oe = p.oe[:1]
-			p.addElement()
-			p.im = inFramesetIM
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(buttonScope, a.P)
-			switch n := p.top(); n.DataAtom {
-			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-				p.oe.pop()
-			}
-			p.addElement()
-		case a.Pre, a.Listing:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			// The newline, if any, will be dealt with by the TextToken case.
-			p.framesetOK = false
-		case a.Form:
-			if p.form == nil {
-				p.popUntil(buttonScope, a.P)
-				p.addElement()
-				p.form = p.top()
-			}
-		case a.Li:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Li:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Dd, a.Dt:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Dd, a.Dt:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Plaintext:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Button:
-			p.popUntil(defaultScope, a.Button)
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-		case a.A:
-			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
-				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
-					p.inBodyEndTagFormatting(a.A)
-					p.oe.remove(n)
-					p.afe.remove(n)
-					break
-				}
-			}
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.Nobr:
-			p.reconstructActiveFormattingElements()
-			if p.elementInScope(defaultScope, a.Nobr) {
-				p.inBodyEndTagFormatting(a.Nobr)
-				p.reconstructActiveFormattingElements()
-			}
-			p.addFormattingElement()
-		case a.Applet, a.Marquee, a.Object:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.afe = append(p.afe, &scopeMarker)
-			p.framesetOK = false
-		case a.Table:
-			if !p.quirks {
-				p.popUntil(buttonScope, a.P)
-			}
-			p.addElement()
-			p.framesetOK = false
-			p.im = inTableIM
-			return true
-		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			if p.tok.DataAtom == a.Input {
-				for _, t := range p.tok.Attr {
-					if t.Key == "type" {
-						if strings.ToLower(t.Val) == "hidden" {
-							// Skip setting framesetOK = false
-							return true
-						}
-					}
-				}
-			}
-			p.framesetOK = false
-		case a.Param, a.Source, a.Track:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-		case a.Hr:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			p.framesetOK = false
-		case a.Image:
-			p.tok.DataAtom = a.Img
-			p.tok.Data = a.Img.String()
-			return false
-		case a.Isindex:
-			if p.form != nil {
-				// Ignore the token.
-				return true
-			}
-			action := ""
-			prompt := "This is a searchable index. Enter search keywords: "
-			attr := []Attribute{{Key: "name", Val: "isindex"}}
-			for _, t := range p.tok.Attr {
-				switch t.Key {
-				case "action":
-					action = t.Val
-				case "name":
-					// Ignore the attribute.
-				case "prompt":
-					prompt = t.Val
-				default:
-					attr = append(attr, t)
-				}
-			}
-			p.acknowledgeSelfClosingTag()
-			p.popUntil(buttonScope, a.P)
-			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
-			if action != "" {
-				p.form.Attr = []Attribute{{Key: "action", Val: action}}
-			}
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
-			p.addText(prompt)
-			p.addChild(&Node{
-				Type:     ElementNode,
-				DataAtom: a.Input,
-				Data:     a.Input.String(),
-				Attr:     attr,
-			})
-			p.oe.pop()
-			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
-		case a.Textarea:
-			p.addElement()
-			p.setOriginalIM()
-			p.framesetOK = false
-			p.im = textIM
-		case a.Xmp:
-			p.popUntil(buttonScope, a.P)
-			p.reconstructActiveFormattingElements()
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Iframe:
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Noembed, a.Noscript:
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-			p.im = inSelectIM
-			return true
-		case a.Optgroup, a.Option:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		case a.Rp, a.Rt:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags()
-			}
-			p.addElement()
-		case a.Math, a.Svg:
-			p.reconstructActiveFormattingElements()
-			if p.tok.DataAtom == a.Math {
-				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
-			} else {
-				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
-			}
-			adjustForeignAttributes(p.tok.Attr)
-			p.addElement()
-			p.top().Namespace = p.tok.Data
-			if p.hasSelfClosingToken {
-				p.oe.pop()
-				p.acknowledgeSelfClosingTag()
-			}
-			return true
-		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			// Ignore the token.
-		default:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Body:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.im = afterBodyIM
-			}
-		case a.Html:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
-				return false
-			}
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.Form:
-			node := p.form
-			p.form = nil
-			i := p.indexOfElementInScope(defaultScope, a.Form)
-			if node == nil || i == -1 || p.oe[i] != node {
-				// Ignore the token.
-				return true
-			}
-			p.generateImpliedEndTags()
-			p.oe.remove(node)
-		case a.P:
-			if !p.elementInScope(buttonScope, a.P) {
-				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
-			}
-			p.popUntil(buttonScope, a.P)
-		case a.Li:
-			p.popUntil(listItemScope, a.Li)
-		case a.Dd, a.Dt:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
-		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.inBodyEndTagFormatting(p.tok.DataAtom)
-		case a.Applet, a.Marquee, a.Object:
-			if p.popUntil(defaultScope, p.tok.DataAtom) {
-				p.clearActiveFormattingElements()
-			}
-		case a.Br:
-			p.tok.Type = StartTagToken
-			return false
-		default:
-			p.inBodyEndTagOther(p.tok.DataAtom)
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	}
-
-	return true
-}
-
-func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
-	// This is the "adoption agency" algorithm, described at
-	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
-
-	// TODO: this is a fairly literal line-by-line translation of that algorithm.
-	// Once the code successfully parses the comprehensive test suite, we should
-	// refactor this code to be more idiomatic.
-
-	// Steps 1-4. The outer loop.
-	for i := 0; i < 8; i++ {
-		// Step 5. Find the formatting element.
-		var formattingElement *Node
-		for j := len(p.afe) - 1; j >= 0; j-- {
-			if p.afe[j].Type == scopeMarkerNode {
-				break
-			}
-			if p.afe[j].DataAtom == tagAtom {
-				formattingElement = p.afe[j]
-				break
-			}
-		}
-		if formattingElement == nil {
-			p.inBodyEndTagOther(tagAtom)
-			return
-		}
-		feIndex := p.oe.index(formattingElement)
-		if feIndex == -1 {
-			p.afe.remove(formattingElement)
-			return
-		}
-		if !p.elementInScope(defaultScope, tagAtom) {
-			// Ignore the tag.
-			return
-		}
-
-		// Steps 9-10. Find the furthest block.
-		var furthestBlock *Node
-		for _, e := range p.oe[feIndex:] {
-			if isSpecialElement(e) {
-				furthestBlock = e
-				break
-			}
-		}
-		if furthestBlock == nil {
-			e := p.oe.pop()
-			for e != formattingElement {
-				e = p.oe.pop()
-			}
-			p.afe.remove(e)
-			return
-		}
-
-		// Steps 11-12. Find the common ancestor and bookmark node.
-		commonAncestor := p.oe[feIndex-1]
-		bookmark := p.afe.index(formattingElement)
-
-		// Step 13. The inner loop. Find the lastNode to reparent.
-		lastNode := furthestBlock
-		node := furthestBlock
-		x := p.oe.index(node)
-		// Steps 13.1-13.2
-		for j := 0; j < 3; j++ {
-			// Step 13.3.
-			x--
-			node = p.oe[x]
-			// Step 13.4 - 13.5.
-			if p.afe.index(node) == -1 {
-				p.oe.remove(node)
-				continue
-			}
-			// Step 13.6.
-			if node == formattingElement {
-				break
-			}
-			// Step 13.7.
-			clone := node.clone()
-			p.afe[p.afe.index(node)] = clone
-			p.oe[p.oe.index(node)] = clone
-			node = clone
-			// Step 13.8.
-			if lastNode == furthestBlock {
-				bookmark = p.afe.index(node) + 1
-			}
-			// Step 13.9.
-			if lastNode.Parent != nil {
-				lastNode.Parent.RemoveChild(lastNode)
-			}
-			node.AppendChild(lastNode)
-			// Step 13.10.
-			lastNode = node
-		}
-
-		// Step 14. Reparent lastNode to the common ancestor,
-		// or for misnested table nodes, to the foster parent.
-		if lastNode.Parent != nil {
-			lastNode.Parent.RemoveChild(lastNode)
-		}
-		switch commonAncestor.DataAtom {
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			p.fosterParent(lastNode)
-		default:
-			commonAncestor.AppendChild(lastNode)
-		}
-
-		// Steps 15-17. Reparent nodes from the furthest block's children
-		// to a clone of the formatting element.
-		clone := formattingElement.clone()
-		reparentChildren(clone, furthestBlock)
-		furthestBlock.AppendChild(clone)
-
-		// Step 18. Fix up the list of active formatting elements.
-		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
-			// Move the bookmark with the rest of the list.
-			bookmark--
-		}
-		p.afe.remove(formattingElement)
-		p.afe.insert(bookmark, clone)
-
-		// Step 19. Fix up the stack of open elements.
-		p.oe.remove(formattingElement)
-		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
-	}
-}
-
-// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
-// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
-// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
-func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		if p.oe[i].DataAtom == tagAtom {
-			p.oe = p.oe[:i]
-			break
-		}
-		if isSpecialElement(p.oe[i]) {
-			break
-		}
-	}
-}
-
-// Section 12.2.5.4.8.
-func textIM(p *parser) bool {
-	switch p.tok.Type {
-	case ErrorToken:
-		p.oe.pop()
-	case TextToken:
-		d := p.tok.Data
-		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
-			// Ignore a newline at the start of a 
-                            
-                            
- - -
- -
- - -
-
- - -
-
- - -
- - - {{end}} - - {{else}} - - {{if eq .Tab "logs"}} -
-
- - - - - - - - - - - {{range .Logs}} - - - - - - - {{end}} -
日志文件管理
文件大小时间操作
{{.Path}}{{FormatByte .Size}}{{dateformat .ModTime "2006-01-02 15:04:05"}} - - {{/*logs/dochub.log日志文件禁止删除,否则程序无法写入日志*/}} - {{if ne .Path "logs/dochub.log"}} | - - {{end}} -
-
-
- {{end}} - -
- {{if or (eq $.Tab "oss") (eq $.Tab "bos") (eq $.Tab "local") (eq $.Tab "qiniu") (eq $.Tab "cos")}} - - - {{if eq $.Tab "local"}} -
如果选择本地存储,则不需要进行任何配置,文档存储到程序目录下的 ./store 文件夹,请确保该文件夹拥有读写权限
- {{end}} - - {{end}} - {{range .Configs}} - {{if eq $.Tab .Category}} -
- - {{if eq .InputType "bool"}} -
- - -
- {{else if eq .InputType "number"}} - - {{else if eq .InputType "textarea"}} - - {{else}} - - {{end}} -
{{.Description}}
-
- {{end}} - {{end}} - - {{if eq $.Tab "email"}} -
每次变更邮箱配置,程序将自动发送一封测试邮件到测试邮箱以验证配置是否正确,配置不正确,则不会更改原配置
- {{end}} - {{if eq $.Tab "oss"}} - 测试OSS连通 -
请先保存更改再进行测试
- {{end}} - {{if eq $.Tab "elasticsearch"}} - - 重建全量索引 -
-
- 索引数量:{{.Count}} -    |    - 连通状态: - {{if .ErrES}} - {{.ErrES}} - {{else}} - 成功 - {{end}} - -
- {{end}} - {{if eq $.Tab "depend"}} -
-

重要:如果需要 sudo 权限才能执行命令,则在命令前加上sudo。如pdf2svg 这个命令,修改为 sudo pdf2svg

-
- {{end}} -
- - - - {{end}} - - - - - - +
+

系统设置

+
+
+ + + + + {{if eq .Tab "default"}} + {{with .Sys}} +
+ +
+ + + + +
+
+ + +
+ + +
+ + +
+ +
+ + 开启 + 关闭 +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+
+
+ + +
+
+
+
+ + +
+
+ + + {{/*
*/}} + {{/*
*/}} + {{/**/}} + {{/**/}} + {{/*
*/}} + {{/*
*/}} + +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ + {{/*
*/}} + {{/*
*/}} + {{/**/}} + {{/**/}} + {{/*
*/}} + {{/*
*/}} + +
+
+ + +
+
+ + + + + + + +
+
+ + +
+
+ +
+
+ + +
+
+
+
+ + +
+
+ +
+
+ + +
+
+
+
+ + +
+
+
+
+ + +
+
+ + + + + +
+
+ + +
+
+ +
+
+ + +
+
+
+
+ + +
+
+ + + {{/*相关文档功能暂时还没开发,先注释掉*/}} + {{/*
*/}} + {{/*
*/}} + {{/**/}} + {{/**/}} + {{/*
*/}} + {{/*
*/}} + + + + +
+ + +
+
+ + +
+ +
+ + +
+
+ + +
+
+ + +
+ +
+ {{end}} + + {{else}} + + {{if eq .Tab "logs"}} +
+
+ + + + + + + + + + + {{range .Logs}} + + + + + + + {{end}} +
日志文件管理
文件大小时间操作
{{.Path}}{{FormatByte .Size}}{{dateformat .ModTime "2006-01-02 15:04:05"}} + + {{/*logs/dochub.log日志文件禁止删除,否则程序无法写入日志*/}} + {{if ne .Path "logs/dochub.log"}} | + + {{end}} +
+
+
+ {{end}} + +
+ {{if or (eq $.Tab "oss") (eq $.Tab "bos") (eq $.Tab "local") (eq $.Tab "qiniu") (eq $.Tab "cos")}} + + + {{if eq $.Tab "local"}} +
如果选择本地存储,则不需要进行任何配置,文档存储到程序目录下的 ./store 文件夹,请确保该文件夹拥有读写权限
+ {{end}} + + {{end}} + {{range .Configs}} + {{if eq $.Tab .Category}} +
+ + {{if eq .InputType "bool"}} +
+ + +
+ {{else if eq .InputType "number"}} + + {{else if eq .InputType "textarea"}} + + {{else}} + + {{end}} +
{{.Description}}
+
+ {{end}} + {{end}} + + {{if eq $.Tab "email"}} +
每次变更邮箱配置,程序将自动发送一封测试邮件到测试邮箱以验证配置是否正确,配置不正确,则不会更改原配置
+ {{end}} + {{if eq $.Tab "oss"}} + 测试OSS连通 +
请先保存更改再进行测试
+ {{end}} + {{if eq $.Tab "elasticsearch"}} + + 重建全量索引 +
+
+ 索引数量:{{.Count}} +    |    + 连通状态: + {{if .ErrES}} + {{.ErrES}} + {{else}} + 成功 + {{end}} + +
+ {{end}} + {{if eq $.Tab "depend"}} +
+

重要:如果需要 sudo 权限才能执行命令,则在命令前加上sudo。如pdf2svg 这个命令,修改为 sudo pdf2svg

+
+ {{end}} +
+ + + + {{end}} + + +
+
+ +
From 632ac0ffc6899454d1f9a938e4ca44f0afb9b736 Mon Sep 17 00:00:00 2001 From: faker Date: Mon, 25 Nov 2019 12:27:20 +0800 Subject: [PATCH 6/7] =?UTF-8?q?=E6=8E=A5=E5=85=A5ldap?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../HomeControllers/IndexController.go | 1 - controllers/HomeControllers/UserController.go | 118 +++++- go.mod | 12 + main.go | 5 +- models/SysModel.go | 14 +- pkg/auth/ldap.go | 43 ++ pkg/tplfunc/tplfunc.go | 11 + views/Home/default/Index/index.html | 386 +++++++++--------- 8 files changed, 375 insertions(+), 215 deletions(-) create mode 100644 pkg/auth/ldap.go create mode 100644 pkg/tplfunc/tplfunc.go diff --git a/controllers/HomeControllers/IndexController.go b/controllers/HomeControllers/IndexController.go index 1ce0bb2..4f00002 100644 --- a/controllers/HomeControllers/IndexController.go +++ b/controllers/HomeControllers/IndexController.go @@ -15,7 +15,6 @@ type IndexController struct { } func (this *IndexController) Get() { - //获取横幅 this.Data["Banners"], _, _ = models.GetList(models.GetTableBanner(), 1, 100, orm.NewCondition().And("status", 1), "Sort") diff --git a/controllers/HomeControllers/UserController.go b/controllers/HomeControllers/UserController.go index 511b543..4613998 100644 --- a/controllers/HomeControllers/UserController.go +++ b/controllers/HomeControllers/UserController.go @@ -8,12 +8,14 @@ import ( "time" "github.com/astaxie/beego" + "github.com/astaxie/beego/logs" "github.com/astaxie/beego/orm" "github.com/astaxie/beego/validation" "DocHub/helper" "DocHub/helper/conv" "DocHub/models" + "DocHub/pkg/auth" ) type UserController struct { @@ -270,33 +272,113 @@ func (this *UserController) Login() { Email, Password string } - this.ParseForm(&post) - valid := validation.Validation{} - res := valid.Email(post.Email, "Email") - if !res.Ok { - this.ResponseJson(false, "登录失败,邮箱格式不正确") + err := this.ParseForm(&post) + if err != nil { + logs.Error("[UserController->Login] parse form err: %v", err) } - ModelUser := models.NewUser() - users, rows, err := ModelUser.UserList(1, 1, "", "", "u.`email`=? and u.`password`=?", post.Email, helper.MD5Crypt(post.Password)) - if rows == 0 || err != nil { + sysM := models.Sys{} + sysConf, err := sysM.Get() + if err != nil { + logs.Error("[UserController->Login] get sys err: %v", err) + this.ResponseJson(false, "系统配置参数有误") + return + } + if sysConf.IsOpenLdap { + ldapConf := auth.LdapConfig{ + Base: sysConf.LdapBase, + Host: sysConf.LdapHost, + Port: sysConf.LdapPort, + BindDN: sysConf.LdapBindDN, + BindPwd: sysConf.LdapBindPwd, + } + + // ldap 逻辑下, Email 其实是用户名,真正的email地址通知认证后可以拿到 + ok, ldapUser, _ := auth.IsAuthorized(post.Email, post.Password, ldapConf) + if !ok { + this.ResponseJson(false, "LDAP 认证失败,请确认") + return + } + + logs.Debug("[UserController->Login] ldapUser: %#v", ldapUser) + var email string + if v, ok := ldapUser["mail"]; ok { + email = v + + } + if email == "" { + logs.Error("[UserController->Login] ldap user info has wrong, data: %#v", ldapUser) + this.ResponseJson(false, "登录失败,LDAP 邮箱配置有误") + return + } + + // ldap 用户登陆或自动注册 + ModelUser := models.NewUser() + users, rows, err := ModelUser.UserList(1, 1, "", "", "u.`email` = ? AND u.`password` = ?", email, helper.MD5Crypt(post.Password)) if err != nil { helper.Logger.Error(err.Error()) + this.ResponseJson(false, "登录失败,数据库记录不存在") + return } - this.ResponseJson(false, "登录失败,邮箱或密码不正确") - } - user := users[0] - this.IsLogin = helper.Interface2Int(user["Id"]) + // 需要新增 + if rows == 0 { + // 注册 + err, uid := models.NewUser().Reg( + email, + post.Email, + post.Password, + post.Password, + "暂无...", + ) + if err != nil { + this.ResponseJson(false, err.Error()) + return + } + this.IsLogin = uid + } else { + user := users[0] + this.IsLogin = helper.Interface2Int(user["Id"]) + } - if this.IsLogin > 0 { - //查询用户有没有被封禁 - if info := ModelUser.UserInfo(this.IsLogin); info.Status == false { //被封禁了 - this.ResponseJson(false, "登录失败,您的账号已被管理员禁用") + if this.IsLogin > 0 { + //查询用户有没有被封禁 + if info := ModelUser.UserInfo(this.IsLogin); info.Status == false { //被封禁了 + this.ResponseJson(false, "登录失败,您的账号已被管理员禁用") + } + this.BaseController.SetCookieLogin(this.IsLogin) + this.ResponseJson(true, "登录成功") + } + } else { + + valid := validation.Validation{} + res := valid.Email(post.Email, "Email") + if !res.Ok { + this.ResponseJson(false, "登录失败,邮箱格式不正确") + } + + ModelUser := models.NewUser() + users, rows, err := ModelUser.UserList(1, 1, "", "", "u.`email`=? and u.`password`=?", post.Email, helper.MD5Crypt(post.Password)) + if rows == 0 || err != nil { + if err != nil { + helper.Logger.Error(err.Error()) + } + this.ResponseJson(false, "登录失败,邮箱或密码不正确") + } + + user := users[0] + this.IsLogin = helper.Interface2Int(user["Id"]) + + if this.IsLogin > 0 { + //查询用户有没有被封禁 + if info := ModelUser.UserInfo(this.IsLogin); info.Status == false { //被封禁了 + this.ResponseJson(false, "登录失败,您的账号已被管理员禁用") + } + this.BaseController.SetCookieLogin(this.IsLogin) + this.ResponseJson(true, "登录成功") } - this.BaseController.SetCookieLogin(this.IsLogin) - this.ResponseJson(true, "登录成功") } + this.ResponseJson(false, "登录失败,未知错误!") } diff --git a/go.mod b/go.mod index dca0551..bb53d53 100644 --- a/go.mod +++ b/go.mod @@ -7,12 +7,24 @@ require ( github.com/TruthHun/gotil v0.0.0-20191003091818-17b80aad8a45 github.com/adamzy/cedar-go v0.0.0-20170805034717-80a9c64b256d // indirect github.com/astaxie/beego v1.12.0 + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect github.com/disintegration/imaging v1.6.2 + github.com/go-ldap/ldap/v3 v3.1.3 // indirect github.com/go-sql-driver/mysql v1.4.1 github.com/huichen/sego v0.0.0-20180617034105-3f3c8a8cfacc github.com/internet-dev/CloudStore v0.0.3 + github.com/internet-dev/go-ldap-client v0.0.4 + github.com/issue9/assert v1.3.4 // indirect + github.com/jtblin/go-ldap-client v0.0.0-20170223121919-b73f66626b33 + github.com/satori/go.uuid v1.2.0 // indirect + github.com/smartystreets/goconvey v1.6.4 // indirect github.com/tdewolff/minify v2.3.6+incompatible github.com/tdewolff/parse v2.3.4+incompatible // indirect + github.com/tdewolff/test v1.0.5 // indirect + google.golang.org/appengine v1.6.5 // indirect + gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df + gopkg.in/ini.v1 v1.51.0 // indirect + gopkg.in/ldap.v2 v2.5.1 // indirect rsc.io/pdf v0.1.1 ) diff --git a/main.go b/main.go index f725d7e..835adee 100644 --- a/main.go +++ b/main.go @@ -3,16 +3,15 @@ package main import ( "fmt" - _ "DocHub/routers" - "github.com/astaxie/beego" "github.com/astaxie/beego/logs" + _ "DocHub/pkg/tplfunc" + _ "DocHub/routers" "DocHub/controllers/HomeControllers" "DocHub/helper" "DocHub/models" - ) //初始化函数 diff --git a/models/SysModel.go b/models/SysModel.go index 15cd77a..1584fe4 100644 --- a/models/SysModel.go +++ b/models/SysModel.go @@ -4,6 +4,8 @@ import ( "fmt" "strings" + "github.com/astaxie/beego/logs" + "github.com/astaxie/beego/orm" ) @@ -61,6 +63,16 @@ func GetTableSys() string { return getTable("sys") } +func IsOpenLdap() bool { + sysM := Sys{} + sys, err := sysM.Get() + if err != nil { + logs.Error("[IsOpenLdap] get sys err: %v", err) + return false + } + return sys.IsOpenLdap +} + //获取系统配置信息。注意:系统配置信息的记录只有一条,而且id主键为1 //@return sys 返回的系统信息 //@return err 错误 @@ -81,7 +93,7 @@ func (this *Sys) UpdateGlobalConfig() { //@param field 需要查询的字段 //@return sys 系统配置信息 func (this *Sys) GetByField(field string) (sys Sys) { - orm.NewOrm().QueryTable(GetTableSys()).Filter("Id", 1).One(&sys, field) + _ = orm.NewOrm().QueryTable(GetTableSys()).Filter("Id", 1).One(&sys, field) return } diff --git a/pkg/auth/ldap.go b/pkg/auth/ldap.go new file mode 100644 index 0000000..9b8a3e5 --- /dev/null +++ b/pkg/auth/ldap.go @@ -0,0 +1,43 @@ +package auth + +import ( + "github.com/astaxie/beego/logs" + "github.com/internet-dev/go-ldap-client" +) + +type LdapConfig struct { + Base string + Host string + Port int + BindDN string + BindPwd string +} + +func IsAuthorized(userName, password string, conf LdapConfig) (bool, map[string]string, error) { + logs.Debug("[IsAuthorized] LdapConfig: %#v", conf) + client := &ldap.LDAPClient{ + Base: conf.Base, + Host: conf.Host, + Port: conf.Port, + UseSSL: false, + BindDN: conf.BindDN, + BindPassword: conf.BindPwd, + UserFilter: "(uid=%s)", + GroupFilter: "", + Attributes: []string{"givenName", "sn", "mail", "uid"}, + } + // It is the responsibility of the caller to close the connection + defer client.Close() + + ok, user, err := client.Authenticate(userName, password) + if err != nil { + logs.Error("[IsAuthorized] Error authenticating user %s: %+v", userName, err) + return false, nil, err + } + if !ok { + logs.Error("Authenticating failed for user %s", userName) + return false, nil, nil + } + + return ok, user, nil +} diff --git a/pkg/tplfunc/tplfunc.go b/pkg/tplfunc/tplfunc.go new file mode 100644 index 0000000..bac45dc --- /dev/null +++ b/pkg/tplfunc/tplfunc.go @@ -0,0 +1,11 @@ +package tplfunc + +import ( + "github.com/astaxie/beego" + + "DocHub/models" +) + +func init() { + beego.AddFuncMap("isOpenLdap", models.IsOpenLdap) +} diff --git a/views/Home/default/Index/index.html b/views/Home/default/Index/index.html index 0069bb3..e860fd6 100644 --- a/views/Home/default/Index/index.html +++ b/views/Home/default/Index/index.html @@ -1,192 +1,194 @@ -
-
- -
-
-
-
收录文档
- {{.Sys.CntDoc}} -
-
-
注册用户
- {{.Sys.CntUser}} -
-
- - {{if gt .LoginUid 0}} -
- {{with .User}} -
-
- {{.Username}} -
- -
- {{if (DoesSign (Interface2Int $.LoginUid))}} - - {{else}} - 今日签到 - {{end}} -
-
-
-
-
财富
- {{.Coin}} -
-
-
文档
- {{.Document}} -
-
-
收藏
- {{.Collect}} -
-
- {{end}} -
- {{else}} - -
-
-
-
- -
-
- -
- {{.xsrfdata}} - - - {{/**/}} -
-
-
- - {{end}} - -
- -
- -
- -
- - -
- -
-
-
- {{range $index,$val:=.Chanels}} - {{if lt $index 4}} -
-
{{$val.Title}}
-
- {{range $idx,$item:=$.Cates}} - {{if eq $item.Pid $val.Id}}{{$item.Title}}{{end}} - {{end}} -
-
- {{end}} - {{end}} -
- -
-
- -
- -
-
-
-
热门推荐
-
- - - -
- -
-
-
-
- - {{range $index,$val:=.Chanels}} - -
-
-
{{.Title}} - 更多>> -
-
- - {{.Title}} - -
- -
-
-
-
- {{if eq (CalcInt $index "%" 2) 0}} -
-
- {{end}} - - {{end}} - - - -
- -
- +
+
+ +
+
+
+
收录文档
+ {{.Sys.CntDoc}} +
+
+
注册用户
+ {{.Sys.CntUser}} +
+
+ + {{if gt .LoginUid 0}} +
+ {{with .User}} +
+
+ {{.Username}} +
+ +
+ {{if (DoesSign (Interface2Int $.LoginUid))}} + + {{else}} + 今日签到 + {{end}} +
+
+
+
+
财富
+ {{.Coin}} +
+
+
文档
+ {{.Document}} +
+
+
收藏
+ {{.Collect}} +
+
+ {{end}} +
+ {{else}} + +
+
+
+
+ +
+
+ +
+ {{.xsrfdata}} + + {{if not isOpenLdap}} + + {{end}} + {{/**/}} +
+
+
+ + {{end}} + +
+ +
+ +
+ +
+ + +
+ +
+
+
+ {{range $index,$val:=.Chanels}} + {{if lt $index 4}} +
+
{{$val.Title}}
+
+ {{range $idx,$item:=$.Cates}} + {{if eq $item.Pid $val.Id}}{{$item.Title}}{{end}} + {{end}} +
+
+ {{end}} + {{end}} +
+ +
+
+ +
+ +
+
+
+
热门推荐
+
+ + + +
+ +
+
+
+
+ + {{range $index,$val:=.Chanels}} + +
+
+
{{.Title}} + 更多>> +
+
+ + {{.Title}} + +
+ +
+
+
+
+ {{if eq (CalcInt $index "%" 2) 0}} +
+
+ {{end}} + + {{end}} + + + +
+ +
+ From 319f42c28ee1aab1e8c9d945bb52f49b9fe2712b Mon Sep 17 00:00:00 2001 From: faker Date: Mon, 25 Nov 2019 14:25:04 +0800 Subject: [PATCH 7/7] =?UTF-8?q?ldap=E8=87=AA=E5=8A=A8=E6=B3=A8=E5=86=8C?= =?UTF-8?q?=E5=8A=A0=E7=BB=9F=E8=AE=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- controllers/HomeControllers/UserController.go | 1 + 1 file changed, 1 insertion(+) diff --git a/controllers/HomeControllers/UserController.go b/controllers/HomeControllers/UserController.go index 4613998..fcdb955 100644 --- a/controllers/HomeControllers/UserController.go +++ b/controllers/HomeControllers/UserController.go @@ -336,6 +336,7 @@ func (this *UserController) Login() { return } this.IsLogin = uid + _ = models.Regulate(models.GetTableSys(), "CntUser", 1, "Id=1") // 站点用户数量增加 } else { user := users[0] this.IsLogin = helper.Interface2Int(user["Id"])