diff --git a/config/config-matterwick.default.json b/config/config-matterwick.default.json index 4cfc7b0b..dd230794 100644 --- a/config/config-matterwick.default.json +++ b/config/config-matterwick.default.json @@ -23,14 +23,8 @@ "KubeClusterName": "", "KubeClusterRegion": "", "LogSettings": { - "EnableConsole": true, - "ConsoleLevel": "DEBUG", - "ConsoleJSON": true, - "EnableFile": false, - "FileLevel": "INFO", - "FileJSON": true, - "FileFormat": "", - "FileLocation": "" + "EnableDebug": true, + "ConsoleJSON": true }, "CWSPublicAPIAddress": "", "CWSInternalAPIAddress": "", diff --git a/go.mod b/go.mod index 4afca713..91ff7994 100644 --- a/go.mod +++ b/go.mod @@ -8,11 +8,11 @@ require ( github.com/google/go-github/v32 v32.1.0 github.com/gorilla/mux v1.8.0 github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5 - github.com/mattermost/mattermost-cloud v0.69.1-0.20230117143751-957e6fd0e408 - github.com/mattermost/mattermost-server/v5 v5.26.0 + github.com/mattermost/mattermost-cloud v0.71.0 + github.com/mattermost/mattermost-server/v6 v6.7.2 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 k8s.io/api v0.24.0 k8s.io/apimachinery v0.24.0 @@ -29,11 +29,12 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09 // indirect github.com/emicklei/go-restful v2.11.2+incompatible // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/go-logr/logr v1.2.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.15 // indirect @@ -45,29 +46,34 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kr/pretty v0.3.0 // indirect + github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/cpuid/v2 v2.0.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect github.com/mattermost/ldap v3.0.4+incompatible // indirect - github.com/mattermost/logr v1.0.5 // indirect - github.com/mattermost/mattermost-operator v1.19.0-rc.2 // indirect + github.com/mattermost/logr/v2 v2.0.15 // indirect + github.com/mattermost/mattermost-operator v1.20.1 // indirect github.com/mattermost/rotator v0.2.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/minio/minio-go/v7 v7.0.24 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/gomega v1.18.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml v1.9.5 // indirect + github.com/philhofer/fwd v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.55.1 // indirect github.com/prometheus-operator/prometheus-operator/pkg/client v0.55.1 // indirect @@ -75,15 +81,14 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/rs/xid v1.4.0 // indirect github.com/slok/sloth v0.10.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/wiggin77/cfg v1.0.2 // indirect + github.com/tinylib/msgp v1.1.6 // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/wiggin77/merror v1.0.3 // indirect github.com/wiggin77/srslog v1.0.1 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.19.1 // indirect golang.org/x/crypto v0.1.0 // indirect golang.org/x/net v0.1.0 // indirect golang.org/x/sys v0.1.0 // indirect @@ -93,8 +98,8 @@ require ( google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index f501b302..ad2a5eba 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,8 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cloud.google.com/go v0.37.1/go.mod h1:SAbnLi6YTSPKSI0dTUEOVLCkyPfKXK8n4ibqiMoj4ok= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= @@ -21,6 +21,10 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.88.0/go.mod h1:dnKwfYbP9hQhefiUvpbcAyoGSHUrOxR20JVElLiUvEY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -34,123 +38,214 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/spanner v1.24.0/go.mod h1:EZI0yH1D/PrXK0XH9Ba5LGXTXWeqZv0ClOD/19a0Z58= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/ocagent v0.4.9/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw= +code.sajari.com/docconv v1.2.0/go.mod h1:r8yfCP6OKbZ9Xkd87aBa4nfpk6ud/PoyLwex3n6cXSc= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-sdk-for-go v26.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v11.5.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= -github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= +github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/JalfResi/justext v0.0.0-20170829062021-c0282dea7198/go.mod h1:0SURuH1rsE8aVWvutuMZghRNrNrYEUzibzJfhEYR8L0= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= -github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/squirrel v1.4.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= -github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/squirrel v1.5.2/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= -github.com/PaulARoy/azurestoragecache v0.0.0-20170906084534-3c249a3ba788/go.mod h1:lY1dZd8HBzJ10eqKERHn3CU59tfhzcAVb2c0ZhIWSOk= +github.com/PuerkitoBio/goquery v1.4.1/go.mod h1:T9ezsOHcCrDCgA8aF1Cqr3sSYbO/xgdy8/R/XiIMAhA= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/PuerkitoBio/goquery v1.8.0/go.mod h1:ypIiRMtY7COPGk+I/YbZLbxsxn9g5ejnI2HSMtkjZvI= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/a8m/expect v1.0.0/go.mod h1:4IwSCMumY49ScypDnjNbYEjgVeqy1/U2cEs3Lat96eA= +github.com/advancedlogic/GoOse v0.0.0-20191112112754-e742535969c1/go.mod h1:f3HCSN1fBWjcpGtXyM119MJgeQl838v6so/PQOqvE1w= +github.com/advancedlogic/GoOse v0.0.0-20210820140952-9d5822d4a625/go.mod h1:f3HCSN1fBWjcpGtXyM119MJgeQl838v6so/PQOqvE1w= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY= +github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= +github.com/araddon/dateparse v0.0.0-20180729174819-cfd92a431d0e/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI= +github.com/araddon/dateparse v0.0.0-20200409225146-d820a6159ab1/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI= +github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/avct/uasurfer v0.0.0-20191028135549-26b5daa857f1/go.mod h1:noBAuukeYOXa0aXGqxr24tADqkwDO2KRD15FsuaZ5a8= -github.com/aws/aws-sdk-go v1.19.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.36.7/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.145 h1:KMVRrIyjBsNz3xGPuHIRnhIuKlb5h3Ii5e5jbi3cgnc= github.com/aws/aws-sdk-go v1.44.145/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blevesearch/bleve v1.0.9/go.mod h1:tb04/rbU29clbtNgorgFd8XdJea4x3ybYaOjWKr+UBU= -github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= +github.com/blevesearch/bleve/v2 v2.3.2/go.mod h1:96+xE5pZUOsr3Y4vHzV1cBC837xZCpwLlX0hrrxnvIg= +github.com/blevesearch/bleve_index_api v1.0.1/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= +github.com/blevesearch/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:9eJDeqxJ3E7WnLebQUlPD7ZjSce7AnDb9vjGmMCbD0A= github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= +github.com/blevesearch/goleveldb v1.0.1/go.mod h1:WrU8ltZbIp0wAoig/MHbrPCXSOLpe79nz5lv5nqfYrQ= +github.com/blevesearch/gtreap v0.1.1/go.mod h1:QaQyDRAT51sotthUWAH4Sj08awFSSWzgYICSZ3w0tYk= github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= +github.com/blevesearch/mmap-go v1.0.3/go.mod h1:pYvKl/grLQrBxuaRYgoTssa4rVujYYeenDp++2E+yvs= +github.com/blevesearch/scorch_segment_api/v2 v2.1.0/go.mod h1:uch7xyyO/Alxkuxa+CGs79vw0QY8BENSBjg6Mw5L5DE= github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= +github.com/blevesearch/snowball v0.6.1/go.mod h1:ZF0IBg5vgpeoUhnMza2v0A/z8m1cWPlwhke08LpNusg= github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= -github.com/blevesearch/zap/v11 v11.0.9/go.mod h1:47hzinvmY2EvvJruzsSCJpro7so8L1neseaGjrtXHOY= -github.com/blevesearch/zap/v12 v12.0.9/go.mod h1:paQuvxy7yXor+0Mx8p2KNmJgygQbQNN+W6HRfL5Hvwc= -github.com/blevesearch/zap/v13 v13.0.1/go.mod h1:XmyNLMvMf8Z5FjLANXwUeDW3e1+o77TTGUWrth7T9WI= -github.com/blevesearch/zap/v14 v14.0.0/go.mod h1:sUc/gPGJlFbSQ2ZUh/wGRYwkKx+Dg/5p+dd+eq6QMXk= +github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q= +github.com/blevesearch/vellum v1.0.7/go.mod h1:doBZpmRhwTsASB4QdUZANlJvqVAUdUyX0ZK7QJCTeBE= +github.com/blevesearch/zapx/v11 v11.3.3/go.mod h1:YzTfUm4kS3e8OmTXDHVV8OzC5MWPO/VPJZQgPNVb4Lc= +github.com/blevesearch/zapx/v12 v12.3.3/go.mod h1:RMl6lOZqF+sTxKvhQDJ5yK2LT3Mu7E2p/jGdjAaiRxs= +github.com/blevesearch/zapx/v13 v13.3.3/go.mod h1:eppobNM35U4C22yDvTuxV9xPqo10pwfP/jugL4INWG4= +github.com/blevesearch/zapx/v14 v14.3.3/go.mod h1:zXNcVzukh0AvG57oUtT1T0ndi09H0kELNaNmekEy0jw= +github.com/blevesearch/zapx/v15 v15.3.3/go.mod h1:C+f/97ZzTzK6vt/7sVlZdzZxKu+5+j4SrGCvr9dJzaY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/braintree/manners v0.0.0-20160418043613-82a8879fc5fd h1:ePesaBzdTmoMQjwqRCLP2jY+jjWMBpwws/LEQdt1fMM= github.com/braintree/manners v0.0.0-20160418043613-82a8879fc5fd/go.mod h1:TNehV1AhBwtT7Bd+rh8G6MoGDbBLNs/sKdk3nvr4Yzg= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -158,76 +253,192 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= -github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= -github.com/couchbase/vellum v1.0.1/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= +github.com/couchbase/moss v0.2.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/dgoogauth v0.0.0-20190221195224-5a805980a5f3/go.mod h1:hEfFauPHz7+NnjR/yHJGhrKo1Za+zStgwUETx3yzqgY= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/die-net/lrucache v0.0.0-20181227122439-19a39ef22a11/go.mod h1:ew0MSjCVDdtGMjF3kzLK9hwdgF5mOE8SbYVF3Rc7mkU= -github.com/disintegration/imaging v1.6.0/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= +github.com/dhui/dktest v0.3.7/go.mod h1:nYMOkafiA07WchSwKnKFUSbGMb2hMm5DrCGiXYG6gwM= github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dyatlov/go-opengraph v0.0.0-20180429202543-816b6608b3c8/go.mod h1:nYia/MIs9OyvXXYboPmNOj0gVWo97Wx0sde+ZuKkoM4= github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09 h1:AQLr//nh20BzN3hIWj2+/Gt3FwSs8Nwo/nz4hMIcLPg= github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09/go.mod h1:nYia/MIs9OyvXXYboPmNOj0gVWo97Wx0sde+ZuKkoM4= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -242,57 +453,69 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/getsentry/sentry-go v0.6.1/go.mod h1:0yZBuzSvbZwBnvaF9VwZIMen3kXscY8/uasKtAX1qG8= +github.com/getsentry/sentry-go v0.13.0/go.mod h1:EOsfu5ZdvKPfeHYV6pTVQnsjfp30+XA7//UooKNumH0= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gigawattio/window v0.0.0-20180317192513-0f5467e35573/go.mod h1:eBvb3i++NHDH4Ugo9qCvMw8t0mTSctaEa5blJbWcNxs= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-asn1-ber/asn1-ber v1.3.2-0.20191121212151-29be175fc3a3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp v2.2.0+incompatible/go.mod h1:7IfkAQnO7jfT/9IQ3R9wL1dFhukN6aQxzKTHnkxzA/E= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -302,8 +525,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= @@ -325,8 +548,18 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-redis/redis/v8 v8.0.0/go.mod h1:isLoQT/NFSP7V67lyvM9GmdvLdyZ7pEhsXvvyQtnQTo= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-resty/resty/v2 v2.0.0/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= +github.com/go-resty/resty/v2 v2.3.0/go.mod h1:UpN9CgLZNsv4e9XG50UU8xdI0F43UQ4HmxLBDwaroHU= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= @@ -342,20 +575,58 @@ github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoM github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.1 h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-migrate/migrate/v4 v4.15.1/go.mod h1:/CrBenUbcDqsW29jGTR/XFqCfVi/Y6mHXlooCcSOJMQ= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -375,7 +646,9 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -394,8 +667,12 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= @@ -420,6 +697,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -433,10 +711,12 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= +github.com/google/go-github/v35 v35.2.0/go.mod h1:s0515YVTI+IMrDoy9Y4pHt9ShGpzHvHO8rZ7L7acgvs= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= @@ -447,6 +727,7 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -459,10 +740,14 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210715191844-86eeefc3e471/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= @@ -471,58 +756,62 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gopherjs/gopherjs v0.0.0-20220221023154-0b2280d3ff96/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/schema v1.1.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= +github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/graph-gophers/dataloader/v6 v6.0.0/go.mod h1:J15OZSnOoZgMkijpbZcwCmglIDYqlUiTEE1xLPbyqZM= +github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8RYb1Y7fYivughjxojTmIu5iAIjSrSLCLeqE= -github.com/hako/durafmt v0.0.0-20200605151348-3a43fc422dd9/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b/go.mod h1:VzxiSdG6j1pi7rwGm/xYI5RbtpBgM8sARDXlvEvxlu0= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -540,17 +829,18 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5 h1:6ZR6HQ+P9ZUwHlYq+bU7e9wqAImxKUguq8fp2gZSgCo= github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5/go.mod h1:Yho0S7KhsnHQRCC5lDraYF1SsLMeWtf/tKdufKu3TJA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -558,21 +848,74 @@ github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/C github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= -github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= -github.com/jamiealquiza/envy v1.1.0/go.mod h1:MP36BriGCLwEHhi1OU8E9569JNZrjWfCvzG7RsPnHus= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= +github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= +github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= +github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= +github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jaytaylor/html2text v0.0.0-20180606194806-57d518f124b0/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= github.com/jaytaylor/html2text v0.0.0-20200412013138-3577fbdbcff7/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= +github.com/jaytaylor/html2text v0.0.0-20211105163654-bc68cce691ba/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jonboulle/clockwork v0.2.3/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -587,16 +930,21 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= -github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= -github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= -github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= -github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= +github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -605,14 +953,29 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.3.0/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= -github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE= +github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -624,15 +987,25 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/levigross/exp-html v0.0.0-20120902181939-8df60c69a8f5/go.mod h1:QMe2wuKJ0o7zIVE8AqiT8rd8epmm6WDIZ2wyuBqYPzM= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -644,65 +1017,88 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= -github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/mattermost/go-i18n v1.11.0/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 h1:Khvh6waxG1cHc4Cz5ef9n3XVCxRWpAKUtqg9PJl5+y8= github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34= -github.com/mattermost/gorp v2.0.1-0.20200527092429-d62b7b9cadfc+incompatible/go.mod h1:0kX1qa3DOpaPJyOdMLeo7TcBN0QmUszj9a/VygOhDe0= -github.com/mattermost/gosaml2 v0.3.2/go.mod h1:Z429EIOiEi9kbq6yHoApfzlcXpa6dzRDc6pO+Vy2Ksk= -github.com/mattermost/ldap v0.0.0-20191128190019-9f62ba4b8d4d/go.mod h1:HLbgMEI5K131jpxGazJ97AxfPDt31osq36YS1oxFQPQ= +github.com/mattermost/gosaml2 v0.3.3/go.mod h1:Z429EIOiEi9kbq6yHoApfzlcXpa6dzRDc6pO+Vy2Ksk= +github.com/mattermost/gziphandler v0.0.1/go.mod h1:CvvZR7sXqhj81V2swXuQY7T04Ccc89u7W7pHNPKev8g= +github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d/go.mod h1:HLbgMEI5K131jpxGazJ97AxfPDt31osq36YS1oxFQPQ= github.com/mattermost/ldap v3.0.4+incompatible h1:SOeNnz+JNR+foQ3yHkYqijb9MLPhXN2BZP/PdX23VDU= github.com/mattermost/ldap v3.0.4+incompatible/go.mod h1:b4reDCcGpBxJ4WX0f224KFY+OR0npin7or7EFpeIko4= -github.com/mattermost/logr v1.0.5 h1:TST38xROPguNh8o90BfDHpp1bz6HfTdFYX5Btw/oLwM= -github.com/mattermost/logr v1.0.5/go.mod h1:YzldchiJXgF789YNDFGXVoCHTQOTrCKwWft9Fwev1iI= -github.com/mattermost/mattermost-cloud v0.69.1-0.20230117143751-957e6fd0e408 h1:7jGxJLva8oMaEj2R2593ksywmE5gE09Yuio1KvKZih0= -github.com/mattermost/mattermost-cloud v0.69.1-0.20230117143751-957e6fd0e408/go.mod h1:FOn0DLCO35VYQtfdWiNmljNkZECOCrVcnm4j5RnYNXE= -github.com/mattermost/mattermost-operator v1.19.0-rc.2 h1:CcCAB02qz4u47yHXIJm3glLDfKo63RFjtGNaJ+31BEA= -github.com/mattermost/mattermost-operator v1.19.0-rc.2/go.mod h1:427nFmeCyiwJ9N0J7hpYJNYE6a3DjPdwl0Vpel3JHVg= -github.com/mattermost/mattermost-server/v5 v5.26.0 h1:pDZcD3J6pK3Hb01lNH7Fn3657NXB3Wnm4AK1b+6YgMg= -github.com/mattermost/mattermost-server/v5 v5.26.0/go.mod h1:TVLwNQLSPNIkFOLoGHCGjZbSc2JEQf5PHUbQvneUSGM= +github.com/mattermost/logr/v2 v2.0.15 h1:+WNbGcsc3dBao65eXlceB6dTILNJRIrvubnsTl3zBew= +github.com/mattermost/logr/v2 v2.0.15/go.mod h1:mpPp935r5dIkFDo2y9Q87cQWhFR/4xXpNh0k/y8Hmwg= +github.com/mattermost/mattermost-cloud v0.71.0 h1:yprVMa0ZAUa1SGGCaU75d2+nEe0mQWKE/D88zHiLjsQ= +github.com/mattermost/mattermost-cloud v0.71.0/go.mod h1:BkIikzN6fkpQK4HgscfBcg6iQX4LURZtZLqhvgc3Lr4= +github.com/mattermost/mattermost-operator v1.20.1 h1:ctaUI7yMLKyqDGvWfzSmNWlx8EG1yVMfduzBkA8HmxE= +github.com/mattermost/mattermost-operator v1.20.1/go.mod h1:WGxmW6iF1+cFc3sJ5uZTVAHolIMjt2/zHumeI7Xt7Hk= +github.com/mattermost/mattermost-server/v6 v6.7.2 h1:rRss2/R5LNbyc/P1OA4kSWuVq+rmnxwepuwGpTwL+U4= +github.com/mattermost/mattermost-server/v6 v6.7.2/go.mod h1:b/iDf7Jn2Pd2jWGzaznoVNT811JZpemdmNGP7M/a7Ao= +github.com/mattermost/morph v0.0.0-20220401091636-39f834798da8/go.mod h1:jxM3g1bx+k2Thz7jofcHguBS8TZn5Pc+o5MGmORObhw= github.com/mattermost/rotator v0.2.0 h1:R3dlMHZjGR7t5T2bk76heDwpypl9rd2sZj4GoAZyuWU= github.com/mattermost/rotator v0.2.0/go.mod h1:1oxWiEhVdZRckZ0uHGd5Zqf04yynm4U/YGHUPsf4sQ8= github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0/go.mod h1:nV5bfVpT//+B1RPD2JvRnxbkLmJEYXmRaaVl15fsXjs= -github.com/mattermost/viper v1.0.4/go.mod h1:uc5hKG9lv4/KRwPOt2c1omOyirS/UnuA2TytiZQSFHM= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= -github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/microcosm-cc/bluemonday v1.0.18/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v6 v6.0.57/go.mod h1:5+R/nM9Pwrh0vqF+HbYYDQ84wdUFPyXHkrdT4AIkifM= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.24 h1:HPlHiET6L5gIgrHRaw1xFo1OaN4bEP/082asWh3WJtI= +github.com/minio/minio-go/v7 v7.0.24/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -714,12 +1110,18 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -727,29 +1129,34 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= -github.com/muesli/smartcrop v0.2.1-0.20181030220600-548bbf0c0965/go.mod h1:i2fCI/UorTfgEpPPLWiFBv4pye+YAG78RwcQLUkocpI= -github.com/muesli/smartcrop v0.3.0/go.mod h1:i2fCI/UorTfgEpPPLWiFBv4pye+YAG78RwcQLUkocpI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= -github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= -github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -757,19 +1164,26 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/olivere/elastic v6.2.33+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -777,36 +1191,76 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/oov/psd v0.0.0-20220121172623-5db5eafcecbb/go.mod h1:GHI1bnmAcbp96z6LNfBJvtrjxhaXGkbsk967utPlvL8= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/gosseract/v2 v2.2.4/go.mod h1:ahOp/kHojnOMGv1RaUnR0jwY5JVa6BYKhYAS8nbMLSo= +github.com/otiai10/gosseract/v2 v2.3.1/go.mod h1:2ZOGgdTIXQzCS5f+N1HkcXRgDX6K3ZoYe3Yvo++cpp4= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/peterbourgon/diskv v0.0.0-20171120014656-2973218375c3/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -814,86 +1268,111 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= -github.com/poy/onpar v1.0.0/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.55.1 h1:IIEF5Sp5jDnqRNoHH5fPLNOsScMhmfyWmFP7m04jokc= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.55.1/go.mod h1:/xf16Bu3krDP6G5WhrJL9avDnLW/AN0g7hAIK63mbes= github.com/prometheus-operator/prometheus-operator/pkg/client v0.55.1 h1:KHtqVbIoIwDSSd0P0k5JI82ied1Ty3yXsjmEsK2CRRM= github.com/prometheus-operator/prometheus-operator/pkg/client v0.55.1/go.mod h1:DkbRtYt185UvoEWqsp6k5zjfTvOvh+Pj/qoh53Qr1v4= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/reflog/dateconstraints v0.2.1/go.mod h1:Ax8AxTBcJc3E/oVS2hd2j7RDM/5MDtuPwuR7lIHtPLo= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/richardlehane/mscfb v1.0.3/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk= +github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk= +github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rudderlabs/analytics-go v3.2.1+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30= -github.com/russellhaering/goxmldsig v0.0.0-20180430223755-7acd5e4a6ef7/go.mod h1:Oz4y6ImuOQZxynhbSXk7btjEfNBtGlj2dcaOvXl2FSM= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rudderlabs/analytics-go v3.3.2+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30= +github.com/russellhaering/goxmldsig v1.2.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/satori/go.uuid v0.0.0-20180103174451-36e9d2ebbde5/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= @@ -903,6 +1382,7 @@ github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9A github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= @@ -912,11 +1392,16 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1l github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/simplereach/timeutils v1.2.0/go.mod h1:VVbQDfN/FHRZa1LSqcwo4kNZ62OOyqLLGQKYB3pB0Q8= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -925,9 +1410,9 @@ github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/slok/sloth v0.10.0 h1:xryA6dH53nNnn2qzG4+T9eiDYqYjO7X7SJDqdK4NvqA= github.com/slok/sloth v0.10.0/go.mod h1:+oEr4aUpDPIYVNzKAovCwrD+l3QszV44C1k+Hcs13KY= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= @@ -937,23 +1422,24 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.1/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -963,14 +1449,20 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/splitio/go-client/v6 v6.1.0/go.mod h1:CEGAEFT99Fwb32ZIRcnZoXTMXddtB6IIpTmt3RP8mnM= +github.com/splitio/go-split-commons/v3 v3.1.0/go.mod h1:29NCy20oAS4ZMy4qkwTd6277eieVDonx4V/aeDU/wUQ= +github.com/splitio/go-toolkit/v4 v4.2.0/go.mod h1:EdIHN0yzB1GTXDYQc0KdKvnjkO/jfUM2YqHVYfhD3Wo= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= -github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -979,73 +1471,105 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/throttled/throttled v2.2.4+incompatible/go.mod h1:0BjlrEGQmvxps+HuXLsyRdqpSRvJpq0PNIsOtqP9Nos= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/throttled/throttled v2.2.5+incompatible/go.mod h1:0BjlrEGQmvxps+HuXLsyRdqpSRvJpq0PNIsOtqP9Nos= +github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= +github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II= -github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wiggin77/cfg v1.0.2 h1:NBUX+iJRr+RTncTqTNvajHwzduqbhCQjEqxLHr6Fk7A= -github.com/wiggin77/cfg v1.0.2/go.mod h1:b3gotba2e5bXTqTW48DwIFoLc+4lWKP7WPi/CdvZ4aE= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg= github.com/wiggin77/merror v1.0.3 h1:8+ZHV+aSnJoYghE3EUThl15C6rvF2TYRSvOSBjdmNR8= github.com/wiggin77/merror v1.0.3/go.mod h1:H2ETSu7/bPE0Ymf4bEwdUoo73OOEkdClnoRisfw0Nm0= github.com/wiggin77/srslog v1.0.1 h1:gA2XjSMy3DrRdX9UqLuDtuVAAshb8bE1NhX1YK0Qe+8= github.com/wiggin77/srslog v1.0.1/go.mod h1:fehkyYDq1QfuYn60TDPu9YdY2bB85VUW2mvN1WynEls= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.11/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -1053,9 +1577,9 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= -go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1066,6 +1590,7 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= @@ -1079,69 +1604,83 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190321063152-3fc05d484e9f/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1161,26 +1700,31 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1190,6 +1734,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1197,6 +1742,7 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1209,34 +1755,49 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211013171255-e13a2654a71e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1248,6 +1809,7 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= @@ -1258,6 +1820,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1268,6 +1831,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1278,25 +1842,34 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1304,15 +1877,21 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1323,34 +1902,59 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220403205710-6acee93ad0eb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1380,6 +1984,7 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1388,17 +1993,18 @@ golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazT golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1407,20 +2013,27 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1431,7 +2044,6 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1444,7 +2056,6 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1452,31 +2063,44 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1499,6 +2123,11 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1509,18 +2138,18 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190321212433-e79c0c59cdb5/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1529,6 +2158,7 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1544,7 +2174,6 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1552,27 +2181,41 @@ google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210721163202-f1cecdd8b78a/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210726143408-b02e89920bf0/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20211013025323-ce878158c4d4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1589,8 +2232,13 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1604,6 +2252,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -1612,6 +2261,7 @@ gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1621,21 +2271,26 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/olivere/elastic.v6 v6.2.33/go.mod h1:2cTT8Z+/LcArSWpCgvZqBgt3VOqXiy7v00w12Lz8bd4= +gopkg.in/olivere/elastic.v6 v6.2.37/go.mod h1:2cTT8Z+/LcArSWpCgvZqBgt3VOqXiy7v00w12Lz8bd4= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1644,21 +2299,25 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= +gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1668,6 +2327,9 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.9/go.mod h1:9u/h6sUh6FxfErv7QqetX1EB3yBMIYOBXzdcf0Gf0rc= k8s.io/api v0.18.15/go.mod h1:+9EU+DfuXrsXZpmECeLEs+kFTO1ju0tukfSLiD//3C4= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/api v0.24.0 h1:J0hann2hfxWr1hinZIDefw7Q96wmCBx6SSB8IY0MdDg= @@ -1676,14 +2338,23 @@ k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3 k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= k8s.io/apimachinery v0.18.9/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEXk= k8s.io/apimachinery v0.18.15/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEXk= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apimachinery v0.24.0 h1:ydFCyC/DjCvFCHK5OPMKBlxayQytB8pxy8YQInd5UyQ= k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apiserver v0.18.9/go.mod h1:vXQzMtUCLsGg1Bh+7Jo2mZKHpHZFCZn8eTNSepcIA1M= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= k8s.io/client-go v0.18.9/go.mod h1:UjkEetDmr40P9NX0Ok3Idt08FCf2I4mIHgjFsot77uY= k8s.io/client-go v0.18.15/go.mod h1:bXuohLq261L1NRWfTA0fjGyTixc01Rf1+yd+ie1ImUY= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U= @@ -1692,8 +2363,15 @@ k8s.io/code-generator v0.18.9/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8 k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= k8s.io/component-base v0.18.9/go.mod h1:tUo4qZtV8m7t/U+0DgY+fcnn4BFZ480fZdzxOkWH4zk= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -1704,6 +2382,7 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= @@ -1711,10 +2390,12 @@ k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.18.9 h1:kqwbA15uygYfLfdMUlyBm/q3OHaYbnirFrg7tGUTVZk= k8s.io/kube-aggregator v0.18.9/go.mod h1:ik5Mf6JaP2M9XbWZR/AYgXx2Nj4EDBrHyakUx7C8cdw= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/sample-controller v0.22.1/go.mod h1:184Fa29md4PuQSEozdEw6n+AAmoodWOy9iCtyfCvAWY= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1723,13 +2404,144 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/cc/v3 v3.33.6/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.33.9/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.33.11/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.34.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.4/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.5/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.7/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.8/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.10/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.15/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.16/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.17/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.18/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= +modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60= +modernc.org/ccgo/v3 v3.10.0/go.mod h1:c0yBmkRFi7uW4J7fwx/JiijwOjeAeR2NoSaRVFPmjMw= +modernc.org/ccgo/v3 v3.11.0/go.mod h1:dGNposbDp9TOZ/1KBxghxtUp/bzErD0/0QW4hhSaBMI= +modernc.org/ccgo/v3 v3.11.1/go.mod h1:lWHxfsn13L3f7hgGsGlU28D9eUOf6y3ZYHKoPaKU0ag= +modernc.org/ccgo/v3 v3.11.3/go.mod h1:0oHunRBMBiXOKdaglfMlRPBALQqsfrCKXgw9okQ3GEw= +modernc.org/ccgo/v3 v3.12.4/go.mod h1:Bk+m6m2tsooJchP/Yk5ji56cClmN6R1cqc9o/YtbgBQ= +modernc.org/ccgo/v3 v3.12.6/go.mod h1:0Ji3ruvpFPpz+yu+1m0wk68pdr/LENABhTrDkMDWH6c= +modernc.org/ccgo/v3 v3.12.8/go.mod h1:Hq9keM4ZfjCDuDXxaHptpv9N24JhgBZmUG5q60iLgUo= +modernc.org/ccgo/v3 v3.12.11/go.mod h1:0jVcmyDwDKDGWbcrzQ+xwJjbhZruHtouiBEvDfoIsdg= +modernc.org/ccgo/v3 v3.12.14/go.mod h1:GhTu1k0YCpJSuWwtRAEHAol5W7g1/RRfS4/9hc9vF5I= +modernc.org/ccgo/v3 v3.12.18/go.mod h1:jvg/xVdWWmZACSgOiAhpWpwHWylbJaSzayCqNOJKIhs= +modernc.org/ccgo/v3 v3.12.20/go.mod h1:aKEdssiu7gVgSy/jjMastnv/q6wWGRbszbheXgWRHc8= +modernc.org/ccgo/v3 v3.12.21/go.mod h1:ydgg2tEprnyMn159ZO/N4pLBqpL7NOkJ88GT5zNU2dE= +modernc.org/ccgo/v3 v3.12.22/go.mod h1:nyDVFMmMWhMsgQw+5JH6B6o4MnZ+UQNw1pp52XYFPRk= +modernc.org/ccgo/v3 v3.12.25/go.mod h1:UaLyWI26TwyIT4+ZFNjkyTbsPsY3plAEB6E7L/vZV3w= +modernc.org/ccgo/v3 v3.12.29/go.mod h1:FXVjG7YLf9FetsS2OOYcwNhcdOLGt8S9bQ48+OP75cE= +modernc.org/ccgo/v3 v3.12.36/go.mod h1:uP3/Fiezp/Ga8onfvMLpREq+KUjUmYMxXPO8tETHtA8= +modernc.org/ccgo/v3 v3.12.38/go.mod h1:93O0G7baRST1vNj4wnZ49b1kLxt0xCW5Hsa2qRaZPqc= +modernc.org/ccgo/v3 v3.12.43/go.mod h1:k+DqGXd3o7W+inNujK15S5ZYuPoWYLpF5PYougCmthU= +modernc.org/ccgo/v3 v3.12.46/go.mod h1:UZe6EvMSqOxaJ4sznY7b23/k13R8XNlyWsO5bAmSgOE= +modernc.org/ccgo/v3 v3.12.47/go.mod h1:m8d6p0zNps187fhBwzY/ii6gxfjob1VxWb919Nk1HUk= +modernc.org/ccgo/v3 v3.12.50/go.mod h1:bu9YIwtg+HXQxBhsRDE+cJjQRuINuT9PUK4orOco/JI= +modernc.org/ccgo/v3 v3.12.51/go.mod h1:gaIIlx4YpmGO2bLye04/yeblmvWEmE4BBBls4aJXFiE= +modernc.org/ccgo/v3 v3.12.53/go.mod h1:8xWGGTFkdFEWBEsUmi+DBjwu/WLy3SSOrqEmKUjMeEg= +modernc.org/ccgo/v3 v3.12.54/go.mod h1:yANKFTm9llTFVX1FqNKHE0aMcQb1fuPJx6p8AcUx+74= +modernc.org/ccgo/v3 v3.12.55/go.mod h1:rsXiIyJi9psOwiBkplOaHye5L4MOOaCjHg1Fxkj7IeU= +modernc.org/ccgo/v3 v3.12.56/go.mod h1:ljeFks3faDseCkr60JMpeDb2GSO3TKAmrzm7q9YOcMU= +modernc.org/ccgo/v3 v3.12.57/go.mod h1:hNSF4DNVgBl8wYHpMvPqQWDQx8luqxDnNGCMM4NFNMc= +modernc.org/ccgo/v3 v3.12.60/go.mod h1:k/Nn0zdO1xHVWjPYVshDeWKqbRWIfif5dtsIOCUVMqM= +modernc.org/ccgo/v3 v3.12.66/go.mod h1:jUuxlCFZTUZLMV08s7B1ekHX5+LIAurKTTaugUr/EhQ= +modernc.org/ccgo/v3 v3.12.67/go.mod h1:Bll3KwKvGROizP2Xj17GEGOTrlvB1XcVaBrC90ORO84= +modernc.org/ccgo/v3 v3.12.73/go.mod h1:hngkB+nUUqzOf3iqsM48Gf1FZhY599qzVg1iX+BT3cQ= +modernc.org/ccgo/v3 v3.12.81/go.mod h1:p2A1duHoBBg1mFtYvnhAnQyI6vL0uw5PGYLSIgF6rYY= +modernc.org/ccgo/v3 v3.12.84/go.mod h1:ApbflUfa5BKadjHynCficldU1ghjen84tuM5jRynB7w= +modernc.org/ccgo/v3 v3.12.86/go.mod h1:dN7S26DLTgVSni1PVA3KxxHTcykyDurf3OgUzNqTSrU= +modernc.org/ccgo/v3 v3.12.88/go.mod h1:0MFzUHIuSIthpVZyMWiFYMwjiFnhrN5MkvBrUwON+ZM= +modernc.org/ccgo/v3 v3.12.90/go.mod h1:obhSc3CdivCRpYZmrvO88TXlW0NvoSVvdh/ccRjJYko= +modernc.org/ccgo/v3 v3.12.92/go.mod h1:5yDdN7ti9KWPi5bRVWPl8UNhpEAtCjuEE7ayQnzzqHA= +modernc.org/ccgo/v3 v3.12.95/go.mod h1:ZcLyvtocXYi8uF+9Ebm3G8EF8HNY5hGomBqthDp4eC8= +modernc.org/ccorpus v1.11.1/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= +modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= +modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.11/go.mod h1:NyF3tsA5ArIjJ83XB0JlqhjTabTCHm9aX4XMPHyQn0Q= +modernc.org/libc v1.11.0/go.mod h1:2lOfPmj7cz+g1MrPNmX65QCzVxgNq2C5o0jdLY2gAYg= +modernc.org/libc v1.11.2/go.mod h1:ioIyrl3ETkugDO3SGZ+6EOKvlP3zSOycUETe4XM4n8M= +modernc.org/libc v1.11.5/go.mod h1:k3HDCP95A6U111Q5TmG3nAyUcp3kR5YFZTeDS9v8vSU= +modernc.org/libc v1.11.6/go.mod h1:ddqmzR6p5i4jIGK1d/EiSw97LBcE3dK24QEwCFvgNgE= +modernc.org/libc v1.11.11/go.mod h1:lXEp9QOOk4qAYOtL3BmMve99S5Owz7Qyowzvg6LiZso= +modernc.org/libc v1.11.13/go.mod h1:ZYawJWlXIzXy2Pzghaf7YfM8OKacP3eZQI81PDLFdY8= +modernc.org/libc v1.11.16/go.mod h1:+DJquzYi+DMRUtWI1YNxrlQO6TcA5+dRRiq8HWBWRC8= +modernc.org/libc v1.11.19/go.mod h1:e0dgEame6mkydy19KKaVPBeEnyJB4LGNb0bBH1EtQ3I= +modernc.org/libc v1.11.24/go.mod h1:FOSzE0UwookyT1TtCJrRkvsOrX2k38HoInhw+cSCUGk= +modernc.org/libc v1.11.26/go.mod h1:SFjnYi9OSd2W7f4ct622o/PAYqk7KHv6GS8NZULIjKY= +modernc.org/libc v1.11.27/go.mod h1:zmWm6kcFXt/jpzeCgfvUNswM0qke8qVwxqZrnddlDiE= +modernc.org/libc v1.11.28/go.mod h1:Ii4V0fTFcbq3qrv3CNn+OGHAvzqMBvC7dBNyC4vHZlg= +modernc.org/libc v1.11.31/go.mod h1:FpBncUkEAtopRNJj8aRo29qUiyx5AvAlAxzlx9GNaVM= +modernc.org/libc v1.11.34/go.mod h1:+Tzc4hnb1iaX/SKAutJmfzES6awxfU1BPvrrJO0pYLg= +modernc.org/libc v1.11.37/go.mod h1:dCQebOwoO1046yTrfUE5nX1f3YpGZQKNcITUYWlrAWo= +modernc.org/libc v1.11.39/go.mod h1:mV8lJMo2S5A31uD0k1cMu7vrJbSA3J3waQJxpV4iqx8= +modernc.org/libc v1.11.42/go.mod h1:yzrLDU+sSjLE+D4bIhS7q1L5UwXDOw99PLSX0BlZvSQ= +modernc.org/libc v1.11.44/go.mod h1:KFq33jsma7F5WXiYelU8quMJasCCTnHK0mkri4yPHgA= +modernc.org/libc v1.11.45/go.mod h1:Y192orvfVQQYFzCNsn+Xt0Hxt4DiO4USpLNXBlXg/tM= +modernc.org/libc v1.11.47/go.mod h1:tPkE4PzCTW27E6AIKIR5IwHAQKCAtudEIeAV1/SiyBg= +modernc.org/libc v1.11.49/go.mod h1:9JrJuK5WTtoTWIFQ7QjX2Mb/bagYdZdscI3xrvHbXjE= +modernc.org/libc v1.11.51/go.mod h1:R9I8u9TS+meaWLdbfQhq2kFknTW0O3aw3kEMqDDxMaM= +modernc.org/libc v1.11.53/go.mod h1:5ip5vWYPAoMulkQ5XlSJTy12Sz5U6blOQiYasilVPsU= +modernc.org/libc v1.11.54/go.mod h1:S/FVnskbzVUrjfBqlGFIPA5m7UwB3n9fojHhCNfSsnw= +modernc.org/libc v1.11.55/go.mod h1:j2A5YBRm6HjNkoSs/fzZrSxCuwWqcMYTDPLNx0URn3M= +modernc.org/libc v1.11.56/go.mod h1:pakHkg5JdMLt2OgRadpPOTnyRXm/uzu+Yyg/LSLdi18= +modernc.org/libc v1.11.58/go.mod h1:ns94Rxv0OWyoQrDqMFfWwka2BcaF6/61CqJRK9LP7S8= +modernc.org/libc v1.11.71/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw= +modernc.org/libc v1.11.75/go.mod h1:dGRVugT6edz361wmD9gk6ax1AbDSe0x5vji0dGJiPT0= +modernc.org/libc v1.11.82/go.mod h1:NF+Ek1BOl2jeC7lw3a7Jj5PWyHPwWD4aq3wVKxqV1fI= +modernc.org/libc v1.11.86/go.mod h1:ePuYgoQLmvxdNT06RpGnaDKJmDNEkV7ZPKI2jnsvZoE= +modernc.org/libc v1.11.87/go.mod h1:Qvd5iXTeLhI5PS0XSyqMY99282y+3euapQFxM7jYnpY= +modernc.org/libc v1.11.88/go.mod h1:h3oIVe8dxmTcchcFuCcJ4nAWaoiwzKCdv82MM0oiIdQ= +modernc.org/libc v1.11.90/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c= +modernc.org/libc v1.11.98/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c= +modernc.org/libc v1.11.99/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI= +modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI= +modernc.org/libc v1.11.104/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ= +modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= +modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= +modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= +modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs= +modernc.org/sqlite v1.14.3/go.mod h1:xMpicS1i2MJ4C8+Ap0vYBqTwYfpFvdnPE6brbFOtV2Y= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= +modernc.org/tcl v1.9.2/go.mod h1:aw7OnlIoiuJgu1gwbTZtrKnGpDqH9wyH++jZcxdqNsg= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.2.20/go.mod h1:zU9FiF4PbHdOTUxw+IF8j7ArBMRPsHgq10uVPt6xTzo= +modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= sigs.k8s.io/aws-iam-authenticator v0.6.3 h1:4AuGwSqbV+TXJHlqhCZKk3k3heQwnKj5Er+z8JB79n4= sigs.k8s.io/aws-iam-authenticator v0.6.3/go.mod h1:1cl1kCN0UQX7XEMJ33E0qJqBtLXz04XT92x4h0shNus= @@ -1741,6 +2553,7 @@ sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= @@ -1750,5 +2563,3 @@ sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -willnorris.com/go/gifresize v1.0.0/go.mod h1:eBM8gogBGCcaH603vxSpnfjwXIpq6nmnj/jauBDKtAk= -willnorris.com/go/imageproxy v0.10.0/go.mod h1:2tWdKRneln3E9X/zwH1RINpQAQWPeUiNynZ7UQ9OROk= diff --git a/main.go b/main.go index 50a91524..c0c75e75 100644 --- a/main.go +++ b/main.go @@ -9,7 +9,6 @@ import ( "os" "os/signal" - "github.com/mattermost/mattermost-server/v5/mlog" "github.com/mattermost/matterwick/server" "github.com/pkg/errors" ) @@ -24,9 +23,6 @@ func main() { fmt.Println(errors.Wrap(err, "unable to load server config")) os.Exit(1) } - server.SetupLogging(config) - - mlog.Info("Loaded config", mlog.String("filename", configFile)) s := server.New(config) diff --git a/server/builds.go b/server/builds.go index 822949a1..13d64304 100644 --- a/server/builds.go +++ b/server/builds.go @@ -6,8 +6,8 @@ import ( "strings" "time" - "github.com/mattermost/mattermost-server/v5/mlog" "github.com/mattermost/matterwick/model" + "github.com/sirupsen/logrus" "github.com/heroku/docker-registry-client/registry" "github.com/pkg/errors" @@ -19,7 +19,7 @@ type Builds struct{} type buildsInterface interface { getInstallationVersion(pr *model.PullRequest) string dockerRegistryClient(s *Server) (*registry.Registry, error) - waitForImage(ctx context.Context, s *Server, reg *registry.Registry, pr *model.PullRequest, imageToCheck string) (*model.PullRequest, error) + waitForImage(ctx context.Context, s *Server, reg *registry.Registry, pr *model.PullRequest, imageToCheck string, logger logrus.FieldLogger) (*model.PullRequest, error) } func (b *Builds) getInstallationVersion(pr *model.PullRequest) string { @@ -39,25 +39,26 @@ func (b *Builds) dockerRegistryClient(s *Server) (reg *registry.Registry, err er return reg, nil } -func (b *Builds) waitForImage(ctx context.Context, s *Server, reg *registry.Registry, pr *model.PullRequest, imageToCheck string) (*model.PullRequest, error) { +func (b *Builds) waitForImage(ctx context.Context, s *Server, reg *registry.Registry, pr *model.PullRequest, imageToCheck string, logger logrus.FieldLogger) (*model.PullRequest, error) { + desiredTag := b.getInstallationVersion(pr) + logger = logger.WithFields(logrus.Fields{"image": imageToCheck, "tag": desiredTag}) + for { select { case <-ctx.Done(): return pr, errors.New("timed out waiting for image to publish") case <-time.After(30 * time.Second): - desiredTag := b.getInstallationVersion(pr) - _, err := reg.ManifestDigest(imageToCheck, desiredTag) if err != nil && !strings.Contains(err.Error(), "status=404") { return pr, errors.Wrap(err, "unable to fetch tag from docker registry") } if err == nil { - mlog.Info("docker tag found, image was uploaded", mlog.String("image", imageToCheck), mlog.String("tag", desiredTag)) + logger.Info("Docker tag found!") return pr, nil } - mlog.Info("docker tag for the build not found. waiting a bit more...", mlog.String("image", imageToCheck), mlog.String("tag", desiredTag), mlog.String("repo", pr.RepoName), mlog.Int("number", pr.Number)) + logger.Debug("Docker tag for the build not found. Waiting...") } } } diff --git a/server/builds_mocked.go b/server/builds_mocked.go index 573ea4df..ae7883fe 100644 --- a/server/builds_mocked.go +++ b/server/builds_mocked.go @@ -4,6 +4,7 @@ import ( "context" "github.com/mattermost/matterwick/model" + "github.com/sirupsen/logrus" "github.com/heroku/docker-registry-client/registry" ) @@ -22,6 +23,6 @@ func (b *MockedBuilds) dockerRegistryClient(s *Server) (*registry.Registry, erro return nil, nil } -func (b *MockedBuilds) waitForImage(ctx context.Context, s *Server, reg *registry.Registry, pr *model.PullRequest, imageToCheck string) (*model.PullRequest, error) { +func (b *MockedBuilds) waitForImage(ctx context.Context, s *Server, reg *registry.Registry, pr *model.PullRequest, imageToCheck string, logger logrus.FieldLogger) (*model.PullRequest, error) { return pr, nil } diff --git a/server/config.go b/server/config.go index 92e3821d..61b7630e 100644 --- a/server/config.go +++ b/server/config.go @@ -75,13 +75,8 @@ type MatterwickConfig struct { KubeClusterRegion string LogSettings struct { - EnableConsole bool - ConsoleJSON bool - ConsoleLevel string - EnableFile bool - FileJSON bool - FileLevel string - FileLocation string + EnableDebug bool + ConsoleJSON bool } CWSPublicAPIAddress string diff --git a/server/github.go b/server/github.go index c514edbb..b62f018c 100644 --- a/server/github.go +++ b/server/github.go @@ -8,47 +8,45 @@ import ( "encoding/json" "io" - "github.com/mattermost/mattermost-server/v5/mlog" "github.com/mattermost/matterwick/model" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/google/go-github/v32/github" "golang.org/x/oauth2" ) // PullRequestEventFromJSON parses json to a github.PullRequestEvent -func PullRequestEventFromJSON(data io.Reader) *github.PullRequestEvent { +func PullRequestEventFromJSON(data io.Reader) (*github.PullRequestEvent, error) { decoder := json.NewDecoder(data) var event github.PullRequestEvent if err := decoder.Decode(&event); err != nil { - mlog.Error("error parsing pull request event from JSON", mlog.Err(err)) - return nil + return nil, errors.Wrap(err, "failed to parse pull request event from JSON") } - return &event + return &event, nil } // IssueCommentEventFromJSON parses json to a github.IssueCommentEvent -func IssueCommentEventFromJSON(data io.Reader) *github.IssueCommentEvent { +func IssueCommentEventFromJSON(data io.Reader) (*github.IssueCommentEvent, error) { decoder := json.NewDecoder(data) var event github.IssueCommentEvent if err := decoder.Decode(&event); err != nil { - mlog.Error("error parsing issue comment from JSON", mlog.Err(err)) - return nil + return nil, errors.Wrap(err, "failed to parse issue comment event from JSON") } - return &event + return &event, nil } // PingEventFromJSON parses json to a github.PingEvent -func PingEventFromJSON(data io.Reader) *github.PingEvent { +func PingEventFromJSON(data io.Reader) (*github.PingEvent, error) { decoder := json.NewDecoder(data) var event github.PingEvent if err := decoder.Decode(&event); err != nil { - mlog.Error("error parsing ping event from JSON", mlog.Err(err)) - return nil + return nil, errors.Wrap(err, "failed to parse ping event from JSON") } - return &event + return &event, nil } func newGithubClient(token string) *github.Client { @@ -99,29 +97,29 @@ func labelsToStringArray(labels []*github.Label) []string { } func (s *Server) sendGitHubComment(repoOwner, repoName string, number int, comment string) { - mlog.Debug("Sending GitHub comment", mlog.Int("issue", number), mlog.String("comment", comment)) + logger := s.Logger.WithFields(logrus.Fields{"issue": number, "comment": comment}) + logger.Info("Sending GitHub comment") client := newGithubClient(s.Config.GithubAccessToken) _, _, err := client.Issues.CreateComment(context.Background(), repoOwner, repoName, number, &github.IssueComment{Body: &comment}) if err != nil { - mlog.Error("Error commenting", mlog.Err(err)) + logger.WithError(err).Error("Error commenting") } } func (s *Server) removeLabel(repoOwner, repoName string, number int, label string) { - mlog.Info("Removing label on issue", mlog.Int("issue", number), mlog.String("label", label)) + logger := s.Logger.WithFields(logrus.Fields{"issue": number, "label": label}) + logger.Info("Removing label on issue") client := newGithubClient(s.Config.GithubAccessToken) _, err := client.Issues.RemoveLabelForIssue(context.Background(), repoOwner, repoName, number, label) if err != nil { - mlog.Error("Error removing the label", mlog.Err(err)) + logger.WithError(err).Error("Error removing the label") } - mlog.Info("Finished removing the label") } func (s *Server) getComments(repoOwner, repoName string, number int) ([]*github.IssueComment, error) { client := newGithubClient(s.Config.GithubAccessToken) comments, _, err := client.Issues.ListComments(context.Background(), repoOwner, repoName, number, nil) if err != nil { - mlog.Error("pr_error", mlog.Err(err)) return nil, err } return comments, nil @@ -133,7 +131,6 @@ func (s *Server) GetUpdateChecks(owner, repoName string, prNumber int) (*model.P prGitHub, _, err := client.PullRequests.Get(context.Background(), owner, repoName, prNumber) pr, err := s.GetPullRequestFromGithub(prGitHub) if err != nil { - mlog.Error("pr_error", mlog.Err(err)) return nil, err } @@ -145,7 +142,7 @@ func (s *Server) checkUserPermission(user, repoOwner string) bool { _, resp, err := client.Organizations.GetOrgMembership(context.Background(), user, repoOwner) if resp.StatusCode == 404 { - mlog.Info("User is not part of the ORG", mlog.String("User", user)) + s.Logger.Warnf("User %s is not part of the ORG", user) return false } if err != nil { @@ -162,14 +159,16 @@ func (s *Server) checkIfRefExists(pr *model.PullRequest, org string, ref string) return false, err } - if response.StatusCode == 200 { - mlog.Debug("Reference found. ", mlog.Int("pr", pr.Number), mlog.String("ref", ref)) + logger := s.Logger.WithFields(logrus.Fields{"ref": ref, "pr": pr.Number}) + switch response.StatusCode { + case 200: + logger.Debug("Reference found") return true, nil - } else if response.StatusCode == 404 { - mlog.Debug("Unable to find reference. ", mlog.Int("pr", pr.Number), mlog.String("ref", ref)) + case 404: + logger.Debug("Unable to find reference") return false, nil - } else { - mlog.Debug("Unknown response code while trying to check for reference. ", mlog.Int("pr", pr.Number), mlog.Int("response_code", response.StatusCode), mlog.String("ref", ref)) + default: + logger.Warnf("Unknown response %d code while trying to check for reference.", response.StatusCode) return false, nil } } @@ -188,7 +187,7 @@ func (s *Server) createRef(pr *model.PullRequest, ref string) { }) if err != nil { - mlog.Error("Error creating reference", mlog.Err(err)) + s.Logger.WithError(err).Error("Failed to create reference") } } @@ -212,18 +211,3 @@ func (s *Server) deleteRef(repoOwner string, repoName string, ref string) error } return nil } - -func (s *Server) areChecksSuccessfulForPr(pr *model.PullRequest, org string) (bool, error) { - client := newGithubClient(s.Config.GithubAccessToken) - mlog.Debug("Checking combined status for ref", mlog.Int("prNumber", pr.Number), mlog.String("ref", pr.Ref), mlog.String("prSha", pr.Sha)) - cStatus, _, err := client.Repositories.GetCombinedStatus(context.Background(), org, pr.RepoName, pr.Sha, nil) - if err != nil { - mlog.Err(err) - return false, err - } - mlog.Debug("Retrieved status for pr", mlog.String("status", cStatus.GetState()), mlog.Int("prNumber", pr.Number), mlog.String("prSha", pr.Sha)) - if cStatus.GetState() == "success" || cStatus.GetState() == "" { - return true, nil - } - return false, nil -} diff --git a/server/kubernetes.go b/server/kubernetes.go index 3e730174..225013bf 100644 --- a/server/kubernetes.go +++ b/server/kubernetes.go @@ -11,9 +11,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/eks" "github.com/mattermost/mattermost-cloud/k8s" - "github.com/mattermost/mattermost-server/v5/mlog" "github.com/pkg/errors" - logrus "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -68,7 +67,7 @@ func deleteNamespace(kc *k8s.KubeClient, namespace string) error { return nil } -func waitForIPAssignment(kc *k8s.KubeClient, namespace string) (string, error) { +func waitForIPAssignment(kc *k8s.KubeClient, namespace string, logger logrus.FieldLogger) (string, error) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() for { @@ -82,7 +81,7 @@ func waitForIPAssignment(kc *k8s.KubeClient, namespace string) (string, error) { return lb.Status.LoadBalancer.Ingress[0].Hostname, nil } - mlog.Info("No IP found yet.. Waiting..") + logger.Debug("No IP found yet. Waiting...") } } } diff --git a/server/limit_rate_gh.go b/server/limit_rate_gh.go index 7440ae21..d904dab2 100644 --- a/server/limit_rate_gh.go +++ b/server/limit_rate_gh.go @@ -7,25 +7,31 @@ import ( "context" "time" - "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/sirupsen/logrus" ) // CheckLimitRateAndSleep checks the api rate and sleep if needed func (s *Server) CheckLimitRateAndSleep() { - mlog.Info("Checking the rate limit on Github and will sleep if need...") + s.Logger.Info("Checking the rate limit on Github and will sleep if need...") client := newGithubClient(s.Config.GithubAccessToken) rate, _, err := client.RateLimits(context.Background()) if err != nil { - mlog.Error("Error getting the rate limit", mlog.Err(err)) + s.Logger.WithError(err).Error("Error getting the rate limit") time.Sleep(30 * time.Second) return } - mlog.Info("Current rate limit", mlog.Int("Remaining Rate", rate.Core.Remaining), mlog.Int("Limit Rate", rate.Core.Limit)) + s.Logger.WithFields(logrus.Fields{ + "Remaining Rate": rate.Core.Remaining, + "Limit Rate": rate.Core.Limit, + }).Info("Current rate limit") if rate.Core.Remaining <= s.Config.GitHubTokenReserve { sleepDuration := time.Until(rate.Core.Reset.Time) + (time.Second * 10) if sleepDuration > 0 { - mlog.Error("--Rate Limiting-- Tokens reached minimum reserve. Sleeping until reset in", mlog.Int("Minimun", s.Config.GitHubTokenReserve), mlog.Any("Sleep time", sleepDuration)) + s.Logger.WithFields(logrus.Fields{ + "Minimum": s.Config.GitHubTokenReserve, + "Sleep time": sleepDuration, + }).Error("--Rate Limiting-- Tokens reached minimum reserve. Sleeping until reset in") time.Sleep(sleepDuration) } } @@ -33,18 +39,21 @@ func (s *Server) CheckLimitRateAndSleep() { // CheckLimitRateAndAbortRequest checks the api rate and abort the request if needed func (s *Server) CheckLimitRateAndAbortRequest() bool { - mlog.Info("Checking the rate limit on Github and will abort request if need...") + s.Logger.Info("Checking the rate limit on Github and will abort request if need...") client := newGithubClient(s.Config.GithubAccessToken) rate, _, err := client.RateLimits(context.Background()) if err != nil { - mlog.Error("Error getting the rate limit", mlog.Err(err)) + s.Logger.WithError(err).Error("Error getting the rate limit") time.Sleep(30 * time.Second) return false } - mlog.Info("Current rate limit", mlog.Int("Remaining Rate", rate.Core.Remaining), mlog.Int("Limit Rate", rate.Core.Limit)) + s.Logger.WithFields(logrus.Fields{ + "Remaining Rate": rate.Core.Remaining, + "Limit Rate": rate.Core.Limit, + }).Info("Current rate limit") if rate.Core.Remaining <= s.Config.GitHubTokenReserve { - mlog.Info("Request will be aborted...") + s.Logger.Error("Request will be aborted...") return true } return false diff --git a/server/logger.go b/server/logger.go new file mode 100644 index 00000000..ff31f926 --- /dev/null +++ b/server/logger.go @@ -0,0 +1,22 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. +// + +package server + +import ( + "os" + + log "github.com/sirupsen/logrus" +) + +var logger *log.Logger + +func init() { + logger = log.New() + logger.SetFormatter(&log.TextFormatter{ + FullTimestamp: true, + }) + // Output to stdout instead of the default stderr. + logger.SetOutput(os.Stdout) +} diff --git a/server/pull_request.go b/server/pull_request.go index a1f75957..812438d9 100644 --- a/server/pull_request.go +++ b/server/pull_request.go @@ -7,8 +7,8 @@ import ( "context" "strings" - "github.com/mattermost/mattermost-server/v5/mlog" "github.com/mattermost/matterwick/model" + "github.com/sirupsen/logrus" "github.com/google/go-github/v32/github" ) @@ -18,25 +18,27 @@ func (s *Server) handlePullRequestEvent(event *github.PullRequestEvent) { prNumber := event.GetNumber() label := event.GetLabel().GetName() - mlog.Info("PR-Event", mlog.String("repo", repoName), mlog.Int("pr", prNumber), mlog.String("action", event.GetAction())) + logger := s.Logger.WithFields(logrus.Fields{"repo": repoName, "pr": prNumber, "action": event.GetAction()}) + logger.Info("PR-Event") + pr, err := s.GetPullRequestFromGithub(event.PullRequest) if err != nil { - mlog.Error("Unable to get PR from GitHub", mlog.Int("pr", prNumber), mlog.Err(err)) + logger.WithError(err).Error("Unable to get PR from GitHub") return } switch event.GetAction() { case "opened": - mlog.Info("PR opened", mlog.String("repo", repoName), mlog.Int("pr", pr.Number)) + logger.Info("PR opened") case "reopened": - mlog.Info("PR reopened", mlog.String("repo", repoName), mlog.Int("pr", pr.Number)) + logger.Info("PR reopened") case "labeled": if event.Label == nil { - mlog.Error("Label event received, but label object was empty") + logger.Error("Label event received, but label object was empty") return } if s.isSpinWickLabel(label) { - mlog.Info("PR received SpinWick label", mlog.String("repo", repoName), mlog.Int("pr", prNumber), mlog.String("label", label)) + logger.WithField("label", label).Info("PR received SpinWick label") switch *event.Label.Name { case s.Config.SetupSpinWick: s.handleCreateSpinWick(pr, "miniSingleton", false, false) @@ -45,16 +47,16 @@ func (s *Server) handlePullRequestEvent(event *github.PullRequestEvent) { case s.Config.SetupSpinWickWithCWS: s.handleCreateSpinWick(pr, "miniSingleton", true, true) default: - mlog.Error("Failed to determine sizing on SpinWick label", mlog.String("label", label)) + logger.WithField("label", label).Error("Failed to determine sizing on SpinWick label") } } case "unlabeled": if event.Label == nil { - mlog.Error("Unlabel event received, but label object was empty") + logger.Error("Unlabel event received, but label object was empty") return } if s.isSpinWickLabel(label) { - mlog.Info("PR SpinWick label was removed", mlog.String("repo", repoName), mlog.Int("pr", prNumber), mlog.String("label", label)) + logger.WithField("label", label).Info("PR SpinWick label was removed") switch *event.Label.Name { case s.Config.SetupSpinWickWithCWS: s.handleDestroySpinWick(pr, true) @@ -63,9 +65,9 @@ func (s *Server) handlePullRequestEvent(event *github.PullRequestEvent) { } } case "synchronize": - mlog.Info("PR has a new commit", mlog.String("repo", repoName), mlog.Int("pr", prNumber)) + logger.Info("PR has a new commit") if s.isSpinWickLabelInLabels(pr.Labels) { - mlog.Info("PR has a SpinWick label, starting upgrade", mlog.String("repo", repoName), mlog.Int("pr", prNumber)) + logger.Info("PR has a SpinWick label, starting upgrade") if s.isSpinWickHALabel(pr.Labels) { s.handleUpdateSpinWick(pr, true, false) } else if s.isSpinWickCloudWithCWSLabel(pr.Labels) { @@ -75,7 +77,7 @@ func (s *Server) handlePullRequestEvent(event *github.PullRequestEvent) { } } case "closed": - mlog.Info("PR was closed", mlog.String("repo", repoName), mlog.Int("pr", prNumber)) + logger.Info("PR was closed") if s.isSpinWickLabelInLabels(pr.Labels) { if s.isSpinWickCloudWithCWSLabel(pr.Labels) { s.handleDestroySpinWick(pr, true) @@ -87,33 +89,7 @@ func (s *Server) handlePullRequestEvent(event *github.PullRequestEvent) { } -func (s *Server) handlePRLabeled(pr *model.PullRequest, addedLabel string) { - mlog.Info("New PR label detected", mlog.Int("pr", pr.Number), mlog.String("label", addedLabel)) - - // Must be sure the comment is created before we let another request test - s.commentLock.Lock() - defer s.commentLock.Unlock() - - comments, _, err := newGithubClient(s.Config.GithubAccessToken).Issues.ListComments(context.Background(), pr.RepoOwner, pr.RepoName, pr.Number, nil) - if err != nil { - mlog.Error("Unable to list comments for PR", mlog.Int("pr", pr.Number), mlog.Err(err)) - return - } - - // Old comment created by MatterWick user for test server deletion will be deleted here - for _, comment := range comments { - if *comment.User.Login == s.Config.Username && - strings.Contains(*comment.Body, s.Config.DestroyedSpinmintMessage) { - mlog.Info("Removing old server deletion comment with ID", mlog.Int64("ID", *comment.ID)) - _, err := newGithubClient(s.Config.GithubAccessToken).Issues.DeleteComment(context.Background(), pr.RepoOwner, pr.RepoName, *comment.ID) - if err != nil { - mlog.Error("Unable to remove old server deletion comment", mlog.Err(err)) - } - } - } -} - -func (s *Server) removeOldComments(comments []*github.IssueComment, pr *model.PullRequest) { +func (s *Server) removeOldComments(comments []*github.IssueComment, pr *model.PullRequest, logger logrus.FieldLogger) { serverMessages := []string{ s.Config.SetupSpinmintFailedMessage, "Spinmint test server created", @@ -137,15 +113,15 @@ func (s *Server) removeOldComments(comments []*github.IssueComment, pr *model.Pu "Mattermost test server with CWS created", } - mlog.Info("Removing old Matterwick comments") + logger.Info("Removing old Matterwick comments") for _, comment := range comments { if *comment.User.Login == s.Config.Username { for _, message := range serverMessages { if strings.Contains(*comment.Body, message) { - mlog.Info("Removing old comment with ID", mlog.Int64("ID", *comment.ID)) + logger.Infof("Removing old comment with ID %d", *comment.ID) _, err := newGithubClient(s.Config.GithubAccessToken).Issues.DeleteComment(context.Background(), pr.RepoOwner, pr.RepoName, *comment.ID) if err != nil { - mlog.Error("Unable to remove old MatterWick comment", mlog.Err(err)) + logger.WithError(err).Error("Unable to remove old MatterWick comment") } break } diff --git a/server/server.go b/server/server.go index b560fa99..4a647659 100644 --- a/server/server.go +++ b/server/server.go @@ -10,18 +10,15 @@ import ( "math/rand" "net/http" "os" - "path/filepath" "strings" "sync" "time" - cloudModel "github.com/mattermost/mattermost-cloud/model" - "github.com/mattermost/mattermost-server/v5/mlog" - "github.com/mattermost/mattermost-server/v5/utils/fileutils" - "github.com/braintree/manners" "github.com/google/go-github/v32/github" "github.com/gorilla/mux" + cloudModel "github.com/mattermost/mattermost-cloud/model" + "github.com/sirupsen/logrus" ) // Server is the MatterWick server. @@ -37,11 +34,11 @@ type Server struct { commentLock sync.Mutex StartTime time.Time + + Logger logrus.FieldLogger } const ( - logFilename = "matterwick.log" - // buildOverride overrides the buildsInterface of the server for development // and testing. buildOverride = "MATTERWICK_BUILD_OVERRIDE" @@ -49,31 +46,41 @@ const ( // New returns a new server with the desired configuration func New(config *MatterwickConfig) *Server { + if config.LogSettings.EnableDebug { + logger.SetLevel(logrus.DebugLevel) + } + if config.LogSettings.ConsoleJSON { + logger.SetFormatter(&logrus.JSONFormatter{}) + } + s := &Server{ Config: config, Router: mux.NewRouter(), webhookChannels: make(map[string]chan cloudModel.WebhookPayload), StartTime: time.Now(), + Logger: logger.WithField("instance", cloudModel.NewID()), } if !isAwsConfigDefined() { - mlog.Error("Missing environment credentials for AWS Access: AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID") + s.Logger.Error("Missing environment credentials for AWS Access: AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID") } s.Builds = &Builds{} if os.Getenv(buildOverride) != "" { - mlog.Warn("Using mocked build tools") + s.Logger.Warn("Using mocked build tools") s.Builds = &MockedBuilds{ Version: os.Getenv(buildOverride), } } + s.Logger.Info("Config loaded") + return s } // Start starts a server func (s *Server) Start() { - mlog.Info("Starting MatterWick Server") + s.Logger.Info("Starting MatterWick Server") rand.Seed(time.Now().Unix()) @@ -81,19 +88,18 @@ func (s *Server) Start() { var handler http.Handler = s.Router go func() { - mlog.Info("Listening on", mlog.String("address", s.Config.ListenAddress)) + s.Logger.WithField("addr", s.Config.ListenAddress).Info("API server listening") err := manners.ListenAndServe(s.Config.ListenAddress, handler) if err != nil { s.logErrorToMattermost(err.Error()) - mlog.Critical("server_error", mlog.Err(err)) - panic(err.Error()) + s.Logger.WithError(err).Panic("server_error") } }() } // Stop stops a server func (s *Server) Stop() { - mlog.Info("Stopping MatterWick") + s.Logger.Info("Stopping MatterWick") manners.Close() } @@ -120,14 +126,14 @@ func (s *Server) githubEvent(w http.ResponseWriter, r *http.Request) { receivedHash := strings.SplitN(r.Header.Get("X-Hub-Signature"), "=", 2) if receivedHash[0] != "sha1" { - mlog.Error("Invalid webhook hash signature: SHA1") + s.Logger.Error("Invalid webhook hash signature: SHA1") w.WriteHeader(http.StatusForbidden) return } err := ValidateSignature(receivedHash, buf, s.Config.GitHubWebhookSecret) if err != nil { - mlog.Error(err.Error()) + s.Logger.Error(err.Error()) w.WriteHeader(http.StatusForbidden) return } @@ -135,21 +141,32 @@ func (s *Server) githubEvent(w http.ResponseWriter, r *http.Request) { eventType := r.Header.Get("X-GitHub-Event") switch eventType { case "ping": - pingEvent := PingEventFromJSON(ioutil.NopCloser(bytes.NewBuffer(buf))) - if pingEvent == nil { - mlog.Info("ping event failed") + pingEvent, err := PingEventFromJSON(ioutil.NopCloser(bytes.NewBuffer(buf))) + if err != nil { + s.Logger.WithError(err).Error("Failed to parse ping event") w.WriteHeader(http.StatusBadRequest) return } - mlog.Info("ping event", mlog.Int64("HookID", pingEvent.GetHookID())) + s.Logger.WithField("HookID", pingEvent.GetHookID()).Info("ping event") case "pull_request": - event := PullRequestEventFromJSON(ioutil.NopCloser(bytes.NewBuffer(buf))) + event, err := PullRequestEventFromJSON(ioutil.NopCloser(bytes.NewBuffer(buf))) + if err != nil { + s.Logger.WithError(err).Error("Failed to parse pull request event") + } + // TODO: determine if we need to perform these event number checks or if + // they can be removed. if event != nil && event.GetNumber() != 0 { - mlog.Info("pr event", mlog.Int("pr", event.GetNumber()), mlog.String("action", event.GetAction())) + s.Logger.WithFields(logrus.Fields{ + "pr": event.GetNumber(), + "action": event.GetAction(), + }).Info("pr event") go s.handlePullRequestEvent(event) } case "issue_comment": - eventIssueEventComment := IssueCommentEventFromJSON(ioutil.NopCloser(bytes.NewBuffer(buf))) + eventIssueEventComment, err := IssueCommentEventFromJSON(ioutil.NopCloser(bytes.NewBuffer(buf))) + if err != nil { + s.Logger.WithError(err).Error("Failed to parse issue comment event") + } if !eventIssueEventComment.GetIssue().IsPullRequest() { // if not a pull request dont need to continue w.WriteHeader(http.StatusAccepted) @@ -161,7 +178,7 @@ func (s *Server) githubEvent(w http.ResponseWriter, r *http.Request) { } } default: - mlog.Info("Other Events") + s.Logger.Info("Other Events") w.WriteHeader(http.StatusNotImplemented) return } @@ -173,13 +190,16 @@ func (s *Server) githubEvent(w http.ResponseWriter, r *http.Request) { func (s *Server) handleCloudWebhook(w http.ResponseWriter, r *http.Request) { payload, err := cloudModel.WebhookPayloadFromReader(r.Body) if err != nil { - mlog.Error("Received webhook event, but couldn't parse the payload") + s.Logger.WithError(err).Error("Received webhook event, but couldn't parse the payload") return } defer r.Body.Close() payloadClone := *payload - mlog.Debug("Received cloud webhook payload", mlog.Int("channels", len(s.webhookChannels)), mlog.String("payload", fmt.Sprintf("%+v", payloadClone))) + s.Logger.WithFields(logrus.Fields{ + "channels": len(s.webhookChannels), + "payload": fmt.Sprintf("%+v", payloadClone), + }).Debug("Received cloud webhook payload") s.webhookChannelsLock.Lock() for _, channel := range s.webhookChannels { @@ -202,29 +222,3 @@ func messageByUserContains(comments []*github.IssueComment, username string, tex return false } - -// GetLogFileLocation gets the log file locations -func GetLogFileLocation(fileLocation string) string { - if fileLocation == "" { - fileLocation, _ = fileutils.FindDir("logs") - } - - return filepath.Join(fileLocation, logFilename) -} - -// SetupLogging sets the logging -func SetupLogging(config *MatterwickConfig) { - loggingConfig := &mlog.LoggerConfiguration{ - EnableConsole: config.LogSettings.EnableConsole, - ConsoleJson: config.LogSettings.ConsoleJSON, - ConsoleLevel: strings.ToLower(config.LogSettings.ConsoleLevel), - EnableFile: config.LogSettings.EnableFile, - FileJson: config.LogSettings.FileJSON, - FileLevel: strings.ToLower(config.LogSettings.FileLevel), - FileLocation: GetLogFileLocation(config.LogSettings.FileLocation), - } - - logger := mlog.NewLogger(loggingConfig) - mlog.RedirectStdLog(logger) - mlog.InitGlobalLogger(logger) -} diff --git a/server/spinwick.go b/server/spinwick.go index 736b7bd4..c6b18e23 100644 --- a/server/spinwick.go +++ b/server/spinwick.go @@ -8,26 +8,25 @@ import ( "encoding/base64" "fmt" "net" + "net/http" "net/url" "os" "strings" "text/template" "time" + "github.com/google/go-github/v32/github" cloudModel "github.com/mattermost/mattermost-cloud/model" - "github.com/mattermost/mattermost-server/v5/mlog" - mattermostModel "github.com/mattermost/mattermost-server/v5/model" + mattermostModel "github.com/mattermost/mattermost-server/v6/model" "github.com/mattermost/matterwick/internal/cloudtools" "github.com/mattermost/matterwick/internal/cws" "github.com/mattermost/matterwick/internal/spinwick" "github.com/mattermost/matterwick/model" - - "github.com/google/go-github/v32/github" "github.com/pkg/errors" + "github.com/sirupsen/logrus" // K8s packages for CWS "github.com/mattermost/mattermost-cloud/k8s" - log "github.com/sirupsen/logrus" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -46,10 +45,10 @@ const ( defaultMultiTenantAnnotation = "multi-tenant" ) -func (s *Server) handleCreateSpinWick(pr *model.PullRequest, size string, withLicense bool, withCloudInfra bool) { - +func (s *Server) handleCreateSpinWick(pr *model.PullRequest, size string, withLicense, withCloudInfra bool) { + logger := s.Logger.WithFields(logrus.Fields{"repo_name": pr.RepoName, "pr": pr.Number}) if pr.State == "closed" { - mlog.Info("PR is closed/merged, will not create a test server", mlog.String("repo_name", pr.RepoName), mlog.Int("pr", pr.Number)) + logger.Info("PR is closed/merged, will not create a test server") s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, "PR is closed/merged not creating a SpinWick Test server") return } @@ -62,7 +61,7 @@ func (s *Server) handleCreateSpinWick(pr *model.PullRequest, size string, withLi } if pr.RepoName == cwsRepoName { s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, "Creating a CWS SpinWick test server") - request = s.createCWSSpinWick(pr) + request = s.createCWSSpinWick(pr, logger) } else if withCloudInfra { s.sendGitHubComment( pr.RepoOwner, @@ -70,7 +69,7 @@ func (s *Server) handleCreateSpinWick(pr *model.PullRequest, size string, withLi pr.Number, "Creating a new SpinWick test cloud server with CWS using Mattermost Cloud.", ) - request = s.createCloudSpinWickWithCWS(pr, size) + request = s.createCloudSpinWickWithCWS(pr, size, logger) } else { var commitMsg string if withLicense { @@ -79,20 +78,22 @@ func (s *Server) handleCreateSpinWick(pr *model.PullRequest, size string, withLi commitMsg = "Creating a new SpinWick test server using Mattermost Cloud." } s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, commitMsg) - request = s.createSpinWick(pr, size, withLicense, nil) + request = s.createSpinWick(pr, size, withLicense, nil, logger) } + logger = logger.WithField("installation_id", request.InstallationID) + if request.Error != nil { if request.Aborted { - mlog.Warn("Aborted creation of SpinWick", mlog.String("abort_message", request.Error.Error()), mlog.String("repo_name", pr.RepoName), mlog.Int("pr", pr.Number), mlog.String("installation_id", request.InstallationID)) + logger.WithError(request.Error).Warn("Aborted creation of SpinWick") } else { - mlog.Error("Failed to create SpinWick", mlog.Err(request.Error), mlog.String("repo_name", pr.RepoName), mlog.Int("pr", pr.Number), mlog.String("installation_id", request.InstallationID)) + logger.WithError(request.Error).Error("Failed to create SpinWick") } comments, err := s.getComments(pr.RepoOwner, pr.RepoName, pr.Number) if err != nil { - mlog.Error("Error getting comments", mlog.Err(err)) + logger.WithError(err).Error("Error getting comments") } else { - s.removeOldComments(comments, pr) + s.removeOldComments(comments, pr, logger) } for _, label := range pr.Labels { if s.isSpinWickLabel(label) { @@ -105,7 +106,7 @@ func (s *Server) handleCreateSpinWick(pr *model.PullRequest, size string, withLi additionalFields := map[string]string{ "Installation ID": request.InstallationID, } - s.logPrettyErrorToMattermost("[ SpinWick ] Creation Failed", pr, request.Error, additionalFields) + s.logPrettyErrorToMattermost("[ SpinWick ] Creation Failed", pr, request.Error, additionalFields, logger) } } @@ -113,7 +114,7 @@ func (s *Server) handleCreateSpinWick(pr *model.PullRequest, size string, withLi // createCloudSpinwickWithCWS will use the defined CWSCloudInstance to create a new user/customer and // instantiate a new MM cloud installation -func (s *Server) createCloudSpinWickWithCWS(pr *model.PullRequest, size string) *spinwick.Request { +func (s *Server) createCloudSpinWickWithCWS(pr *model.PullRequest, size string, logger logrus.FieldLogger) *spinwick.Request { request := &spinwick.Request{ InstallationID: "n/a", Error: nil, @@ -170,7 +171,7 @@ func (s *Server) createCloudSpinWickWithCWS(pr *model.PullRequest, size string) image := mattermostEEImage ctx, cancel := context.WithTimeout(context.Background(), 45*time.Minute) defer cancel() - prNew, errImage := s.Builds.waitForImage(ctx, s, reg, pr, image) + prNew, errImage := s.Builds.waitForImage(ctx, s, reg, pr, image, logger) if errImage != nil { return request.WithError(errors.Wrap(errImage, "error waiting for the docker image. Aborting")).IntentionalAbort() } @@ -189,7 +190,7 @@ func (s *Server) createCloudSpinWickWithCWS(pr *model.PullRequest, size string) return request.WithError(errors.Wrap(err, "Error occurred whilst creating installation")).ShouldReportError() } request.InstallationID = createResponse.InstallationID - s.waitForInstallationStable(ctx, pr, request) + s.waitForInstallationStable(ctx, pr, request, logger) if request.Error != nil { return request.WithError(errors.Wrap(request.Error, "error waiting for installation to become stable")) } @@ -200,7 +201,7 @@ func (s *Server) createCloudSpinWickWithCWS(pr *model.PullRequest, size string) return request } -func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { +func (s *Server) createCWSSpinWick(pr *model.PullRequest, logger logrus.FieldLogger) *spinwick.Request { request := &spinwick.Request{ InstallationID: "n/a", Error: nil, @@ -208,7 +209,6 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { Aborted: false, } - logger := log.WithField("PR", fmt.Sprintf("%s: #%d", pr.RepoName, pr.Number)) kc, err := s.newClient(logger) if err != nil { return request.WithError(errors.Wrap(err, "Error occurred while getting Kube Client")) @@ -233,7 +233,7 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { return request.WithError(errors.Wrap(errDocker, "unable to get docker registry client")).ShouldReportError() } - prNew, errImage := s.Builds.waitForImage(ctx, s, reg, pr, image) + prNew, errImage := s.Builds.waitForImage(ctx, s, reg, pr, image, logger) if errImage != nil { return request.WithError(errors.Wrap(errImage, "error waiting for the docker image. Aborting")).IntentionalAbort() } @@ -251,7 +251,7 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { template, err := template.ParseFiles("/matterwick/templates/cws/cws_deployment.tmpl") if err != nil { - mlog.Error("Error loading deployment template ", mlog.Err(err)) + logger.WithError(err).Error("Error loading deployment template ") } file, err := os.Create(deployment.DeployFilePath) @@ -261,7 +261,7 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { err = template.Execute(file, deployment) if err != nil { - mlog.Error("Error executing template ", mlog.Err(err)) + logger.WithError(err).Error("Error executing template ") } file.Close() @@ -277,9 +277,9 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { return request.WithError(errors.Wrap(err, "Error deploying from manifest template")).ShouldReportError() } - mlog.Info("Deployment created successfully. Cleanup complete") + logger.Info("Deployment created successfully. Cleanup complete") - lbURL, _ := waitForIPAssignment(kc, deployment.Namespace) + lbURL, _ := waitForIPAssignment(kc, deployment.Namespace, logger) headers := map[string]string{ "x-api-key": s.Config.AWSAPIKey, @@ -292,7 +292,7 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { }) if err != nil { - mlog.Error("Unable to create webhook", mlog.Err(err)) + logger.WithError(err).Error("Unable to create webhook") return request.WithError(errors.Wrap(err, "Error creating provisioner webhook")).ShouldReportError() } @@ -300,7 +300,7 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { secret, err := cwsClient.RegisterStripeWebhook(fmt.Sprintf("http://%s", lbURL), namespace.GetName()) if err != nil { - mlog.Error("Unable to register stripe webhook", mlog.Err(err)) + logger.WithError(err).Error("Unable to register stripe webhook") return request.WithError(errors.Wrap(err, "Error registering stripe webhook")).ShouldReportError() } @@ -314,7 +314,7 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { metav1.PatchOptions{}, ) if err != nil { - mlog.Error("Unable to update CWS_SITEURL or STRIPE_WEBHOOK_SIGNATURE_SECRET secret", mlog.Err(err)) + logger.WithError(err).Error("Unable to update CWS_SITEURL or STRIPE_WEBHOOK_SIGNATURE_SECRET secret") } else { // patch the deployment to force new pods that will be aware of the new secrets. _, err := kc.Clientset.AppsV1().Deployments(namespaceName).Patch( @@ -325,16 +325,16 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { metav1.PatchOptions{}, ) if err != nil { - mlog.Error("Unable to refresh the deployment", mlog.Err(err)) + logger.WithError(err).Error("Unable to refresh the deployment") } } comments, errComments := s.getComments(pr.RepoOwner, pr.RepoName, pr.Number) commentsToDelete := []string{"Creating a SpinWick test CWS", "Spinwick Kubernetes namespace"} if errComments != nil { - mlog.Error("pr_error", mlog.Err(err)) + logger.WithError(err).Error("pr_error") } else { - s.removeCommentsWithSpecificMessages(comments, commentsToDelete, pr) + s.removeCommentsWithSpecificMessages(comments, commentsToDelete, pr, logger) } spinwickURL := fmt.Sprintf("http://%s", lbURL) @@ -349,7 +349,7 @@ func (s *Server) createCWSSpinWick(pr *model.PullRequest) *spinwick.Request { // - no cloud installation found = installation is created // - cloud installation found = actual ID string and no error // - any errors = error is returned -func (s *Server) createSpinWick(pr *model.PullRequest, size string, withLicense bool, envVars cloudModel.EnvVarMap) *spinwick.Request { +func (s *Server) createSpinWick(pr *model.PullRequest, size string, withLicense bool, envVars cloudModel.EnvVarMap, logger logrus.FieldLogger) *spinwick.Request { request := &spinwick.Request{ InstallationID: "n/a", Error: nil, @@ -372,12 +372,12 @@ func (s *Server) createSpinWick(pr *model.PullRequest, size string, withLicense } comments, errComments := s.getComments(pr.RepoOwner, pr.RepoName, pr.Number) if errComments != nil { - mlog.Error("pr_error", mlog.Err(err)) + logger.WithError(err).Error("pr_error") } else { - s.removeCommentsWithSpecificMessages(comments, serverNewCommitMessages, pr) + s.removeCommentsWithSpecificMessages(comments, serverNewCommitMessages, pr, logger) } - mlog.Info("No SpinWick found for this PR. Creating a new one.") + logger.Info("No SpinWick found for this PR. Creating a new one.") ctx, cancel := context.WithTimeout(context.Background(), 45*time.Minute) defer cancel() @@ -391,37 +391,37 @@ func (s *Server) createSpinWick(pr *model.PullRequest, size string, withLicense } // if is server or webapp then set version to the PR git commit hash if pr.RepoName == mattermostWebAppRepo { - mlog.Info("Waiting for docker image to set up SpinWick", mlog.Int("pr", pr.Number), mlog.String("repo_owner", pr.RepoOwner), mlog.String("repo_name", pr.RepoName)) + logger.Info("Waiting for docker image to set up SpinWick") // Waiting for Enterprise Image - prNew, errImage := s.Builds.waitForImage(ctx, s, reg, pr, image) + prNew, errImage := s.Builds.waitForImage(ctx, s, reg, pr, image, logger) if errImage != nil { return request.WithError(errors.Wrap(errImage, "error waiting for the docker image. Aborting")).IntentionalAbort() } version = s.Builds.getInstallationVersion(prNew) } else if pr.RepoName == mattermostServerRepo { - mlog.Info("Waiting for docker image to set up SpinWick", mlog.Int("pr", pr.Number), mlog.String("repo_owner", pr.RepoOwner), mlog.String("repo_name", pr.RepoName)) + logger.Info("Waiting for docker image to set up SpinWick") ctxEnterprise, cancelEnterprise := context.WithTimeout(context.Background(), 30*time.Minute) defer cancelEnterprise() // Waiting for Enterprise Image - prNew, errImage := s.Builds.waitForImage(ctxEnterprise, s, reg, pr, image) + prNew, errImage := s.Builds.waitForImage(ctxEnterprise, s, reg, pr, image, logger) if errImage != nil { if withLicense { s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, "Enterprise Edition Image not available in the 30 minutes timeframe.\nPlease check if the EE Pipeline was triggered and if not please trigger and re-add the `Setup HA Cloud Test Server` again.") return request.WithError(errors.Wrap(errImage, "error waiting for the docker image. Aborting. Check if EE pipeline ran")).IntentionalAbort() } - mlog.Warn("Did not find the EE image, fallback to TE", mlog.Int("pr", pr.Number), mlog.String("repo_owner", pr.RepoOwner), mlog.String("repo_name", pr.RepoName), mlog.String("sha", pr.Sha)) + logger.WithField("sha", pr.Sha).Warn("Did not find the EE image, fallback to TE") s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, "Enterprise Edition Image not available in the 30 minutes timeframe, checking the Team Edition Image and if available will use that.") //fallback to TE image = mattermostTeamImage ctxTeam, cancelTeam := context.WithTimeout(context.Background(), 30*time.Minute) defer cancelTeam() - prNew, errImage = s.Builds.waitForImage(ctxTeam, s, reg, pr, image) + prNew, errImage = s.Builds.waitForImage(ctxTeam, s, reg, pr, image, logger) if errImage != nil { - mlog.Warn("Did not find TE image", mlog.Int("pr", pr.Number), mlog.String("repo_owner", pr.RepoOwner), mlog.String("repo_name", pr.RepoName), mlog.String("sha", pr.Sha)) + logger.WithField("sha", pr.Sha).Warn("Did not find TE image") return request.WithError(errors.Wrap(errDocker, "unable to get docker registry client")).ShouldReportError() } } @@ -429,7 +429,7 @@ func (s *Server) createSpinWick(pr *model.PullRequest, size string, withLicense version = s.Builds.getInstallationVersion(prNew) } - mlog.Info("Provisioning Server - Installation request") + logger.Info("Provisioning Server - Installation request") headers := map[string]string{ "x-api-key": s.Config.AWSAPIKey, @@ -478,20 +478,21 @@ func (s *Server) createSpinWick(pr *model.PullRequest, size string, withLicense return request.WithError(errors.Wrap(err, "unable to make the installation creation request to the provisioning server")).ShouldReportError() } request.InstallationID = installation.ID - mlog.Info("Provisioner Server - installation request", mlog.String("InstallationID", request.InstallationID)) + logger = logger.WithField("installation_id", request.InstallationID) + logger.Info("Provisioner Server - installation request") wait := 1200 - mlog.Info("Waiting for mattermost installation to become stable", mlog.Int("wait_seconds", wait)) + logger.Info("Waiting %d seconds for mattermost installation to become stable") ctx, cancel = context.WithTimeout(context.Background(), time.Duration(wait)*time.Second) defer cancel() - s.waitForInstallationStable(ctx, pr, request) + s.waitForInstallationStable(ctx, pr, request, logger) if request.Error != nil { return request.WithError(errors.Wrap(request.Error, "error waiting for installation to become stable")) } spinwickURL := fmt.Sprintf("https://%s.%s", s.makeSpinWickID(pr.RepoName, pr.Number), s.Config.DNSNameTestServer) - err = s.initializeMattermostTestServer(spinwickURL, pr.Number) + err = s.initializeMattermostTestServer(spinwickURL, pr.Number, logger) if err != nil { return request.WithError(errors.Wrap(err, "failed to initialize the Installation")).ShouldReportError() } @@ -503,6 +504,8 @@ func (s *Server) createSpinWick(pr *model.PullRequest, size string, withLicense } func (s *Server) handleUpdateSpinWick(pr *model.PullRequest, withLicense, withCloudInfra bool) { + logger := s.Logger.WithFields(logrus.Fields{"repo_name": pr.RepoName, "pr": pr.Number}) + // other repos we are not updating request := &spinwick.Request{ InstallationID: "n/a", @@ -512,35 +515,36 @@ func (s *Server) handleUpdateSpinWick(pr *model.PullRequest, withLicense, withCl } if pr.RepoName == cwsRepoName { - request = s.updateKubeSpinWick(pr) + request = s.updateKubeSpinWick(pr, logger) } else { - request = s.updateSpinWick(pr, withLicense, withCloudInfra) + request = s.updateSpinWick(pr, withLicense, withCloudInfra, logger) } + logger = logger.WithField("installation_id", request.InstallationID) + if request.Error != nil { if request.Aborted { - mlog.Warn("Aborted update of SpinWick", mlog.String("abort_message", request.Error.Error()), mlog.String("repo_name", pr.RepoName), mlog.Int("pr", pr.Number), mlog.String("installation_id", request.InstallationID)) + logger.WithError(request.Error).Warn("Aborted update of SpinWick") } else { - mlog.Error("Failed to update SpinWick", mlog.Err(request.Error), mlog.String("repo_name", pr.RepoName), mlog.Int("pr", pr.Number), mlog.String("installation_id", request.InstallationID)) + logger.WithError(request.Error).Error("Failed to update SpinWick") } s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, s.Config.SetupSpinmintFailedMessage) if request.ReportError { additionalFields := map[string]string{ "Installation ID": request.InstallationID, } - s.logPrettyErrorToMattermost("[ SpinWick ] Update Failed", pr, request.Error, additionalFields) + s.logPrettyErrorToMattermost("[ SpinWick ] Update Failed", pr, request.Error, additionalFields, logger) } } } -func (s *Server) updateKubeSpinWick(pr *model.PullRequest) *spinwick.Request { +func (s *Server) updateKubeSpinWick(pr *model.PullRequest, logger logrus.FieldLogger) *spinwick.Request { request := &spinwick.Request{ InstallationID: "n/a", Error: nil, ReportError: false, Aborted: false, } - logger := log.WithField("PR", fmt.Sprintf("%s: #%d", pr.RepoName, pr.Number)) kc, err := s.newClient(logger) if err != nil { @@ -566,9 +570,9 @@ func (s *Server) updateKubeSpinWick(pr *model.PullRequest) *spinwick.Request { } comments, errComments := s.getComments(pr.RepoOwner, pr.RepoName, pr.Number) if errComments != nil { - mlog.Error("pr_error", mlog.Err(err)) + logger.WithError(err).Error("pr_error") } else { - s.removeCommentsWithSpecificMessages(comments, serverNewCommitMessages, pr) + s.removeCommentsWithSpecificMessages(comments, serverNewCommitMessages, pr, logger) } // Now that we know this namespace exists, notify via comment that we are attempting to upgrade the deployment s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, "New commit detected. SpinWick will upgrade if the updated docker image is available.") @@ -584,7 +588,7 @@ func (s *Server) updateKubeSpinWick(pr *model.PullRequest) *spinwick.Request { return request.WithError(errors.Wrap(errDocker, "unable to get docker registry client")).ShouldReportError() } - prNew, errImage := s.Builds.waitForImage(ctx, s, reg, pr, image) + prNew, errImage := s.Builds.waitForImage(ctx, s, reg, pr, image, logger) if errImage != nil { return request.WithError(errors.Wrap(errImage, "error waiting for the docker image. Aborting")).IntentionalAbort() } @@ -594,7 +598,7 @@ func (s *Server) updateKubeSpinWick(pr *model.PullRequest) *spinwick.Request { deployClient := kc.Clientset.AppsV1().Deployments(namespaceName) deployment, err := deployClient.Get(context.Background(), "cws-test", metav1.GetOptions{}) if err != nil && !k8sErrors.IsNotFound(err) { - mlog.Info("Attempted to update a deployment that does not exist") + logger.Warn("Attempted to update a deployment that does not exist") return request.WithError(errors.Wrap(err, "Attempted to update a deployment that does not exist")).ShouldReportError() } @@ -617,10 +621,10 @@ func (s *Server) updateKubeSpinWick(pr *model.PullRequest) *spinwick.Request { serverUpdateMessage := []string{ "CWS test server updated", } - s.removeCommentsWithSpecificMessages(comments, serverUpdateMessage, pr) + s.removeCommentsWithSpecificMessages(comments, serverUpdateMessage, pr, logger) } - lbURL, _ := waitForIPAssignment(kc, namespaceName) + lbURL, _ := waitForIPAssignment(kc, namespaceName, logger) spinwickURL := fmt.Sprintf("http://%s", lbURL) msg := fmt.Sprintf("CWS test server updated with git commit `%s`.\n\nAccess here: %s", pr.Sha, spinwickURL) s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, msg) @@ -632,7 +636,7 @@ func (s *Server) updateKubeSpinWick(pr *model.PullRequest) *spinwick.Request { // - no cloud installation found = error is returned // - cloud installation found and updated = actual ID string and no error // - any errors = error is returned -func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInfra bool) *spinwick.Request { +func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInfra bool, logger logrus.FieldLogger) *spinwick.Request { request := &spinwick.Request{ InstallationID: "n/a", Error: nil, @@ -660,7 +664,8 @@ func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInf } request.InstallationID = installationID - mlog.Info("Sleeping a bit to wait for the build process to start", mlog.Int("pr", pr.Number), mlog.String("sha", pr.Sha)) + logger = logger.WithField("sha", pr.Sha) + logger.Info("Sleeping a bit to wait for the build process to start") time.Sleep(60 * time.Second) // Remove old message to reduce the amount of similar messages and avoid confusion @@ -669,9 +674,9 @@ func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInf } comments, errComments := s.getComments(pr.RepoOwner, pr.RepoName, pr.Number) if errComments != nil { - mlog.Error("pr_error", mlog.Err(err)) + logger.WithError(err).Error("pr_error") } else { - s.removeCommentsWithSpecificMessages(comments, serverNewCommitMessages, pr) + s.removeCommentsWithSpecificMessages(comments, serverNewCommitMessages, pr, logger) } s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, "New commit detected. SpinWick will upgrade if the updated docker image is available.") @@ -680,12 +685,12 @@ func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInf return request.WithError(errors.Wrap(err, "unable to get docker registry client")).ShouldReportError() } - mlog.Info("Waiting for docker image to update SpinWick", mlog.Int("pr", pr.Number), mlog.String("repo_owner", pr.RepoOwner), mlog.String("repo_name", pr.RepoName)) + logger.Info("Waiting for docker image to update SpinWick") ctx, cancel := context.WithTimeout(context.Background(), 45*time.Minute) defer cancel() - pr, err = s.Builds.waitForImage(ctx, s, reg, pr, image) + pr, err = s.Builds.waitForImage(ctx, s, reg, pr, image, logger) if err != nil { return request.WithError(errors.Wrap(err, "error waiting for the docker image. Aborting")).IntentionalAbort() } @@ -715,7 +720,7 @@ func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInf return request.WithError(errors.New("another process already updated the installation version. Aborting")).IntentionalAbort() } - mlog.Info("Provisioning Server - Upgrade request", mlog.String("SHA", pr.Sha)) + logger.Info("Provisioning Server - Upgrade request") _, err = cloudClient.UpdateInstallation(request.InstallationID, upgradeRequest) if err != nil { @@ -723,11 +728,11 @@ func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInf } wait := 600 - mlog.Info("Waiting for mattermost installation to become stable", mlog.Int("wait_seconds", wait)) + logger.Infof("Waiting %d seconds for mattermost installation to become stable", wait) ctx, cancel = context.WithTimeout(context.Background(), time.Duration(wait)*time.Second) defer cancel() - s.waitForInstallationStable(ctx, pr, request) + s.waitForInstallationStable(ctx, pr, request, logger) if request.Error != nil { return request.WithError(errors.Wrap(request.Error, "error waiting for installation to become stable")) } @@ -737,7 +742,7 @@ func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInf serverUpdateMessage := []string{ "Mattermost test server updated", } - s.removeCommentsWithSpecificMessages(comments, serverUpdateMessage, pr) + s.removeCommentsWithSpecificMessages(comments, serverUpdateMessage, pr, logger) } mmURL := fmt.Sprintf("https://%s.%s", s.makeSpinWickID(pr.RepoName, pr.Number), s.Config.DNSNameTestServer) @@ -748,6 +753,8 @@ func (s *Server) updateSpinWick(pr *model.PullRequest, withLicense, withCloudInf } func (s *Server) handleDestroySpinWick(pr *model.PullRequest, withCloud bool) { + logger := s.Logger.WithFields(logrus.Fields{"repo_name": pr.RepoName, "pr": pr.Number}) + request := &spinwick.Request{ InstallationID: "n/a", Error: nil, @@ -756,30 +763,32 @@ func (s *Server) handleDestroySpinWick(pr *model.PullRequest, withCloud bool) { } if pr.RepoName == cwsRepoName { - request = s.destroyKubeSpinWick(pr) + request = s.destroyKubeSpinWick(pr, logger) } else if withCloud { - request = s.destroyCloudSpinWickWithCWS(pr) + request = s.destroyCloudSpinWickWithCWS(pr, logger) } else { - request = s.destroySpinWick(pr) + request = s.destroySpinWick(pr, logger) } + logger = logger.WithField("installation_id", request.InstallationID) + if request.Error != nil { if request.Aborted { - mlog.Warn("Aborted deletion of SpinWick", mlog.String("abort_message", request.Error.Error()), mlog.String("repo_name", pr.RepoName), mlog.Int("pr", pr.Number), mlog.String("installation_id", request.InstallationID)) + logger.WithError(request.Error).Warn("Aborted deletion of SpinWick") } else { - mlog.Error("Failed to delete SpinWick", mlog.Err(request.Error), mlog.String("repo_name", pr.RepoName), mlog.Int("pr", pr.Number), mlog.String("installation_id", request.InstallationID)) + logger.WithError(request.Error).Error("Failed to delete SpinWick") } if request.ReportError { additionalFields := map[string]string{ "Installation ID": request.InstallationID, } - s.logPrettyErrorToMattermost("[ SpinWick ] Destroy Failed", pr, request.Error, additionalFields) + s.logPrettyErrorToMattermost("[ SpinWick ] Destroy Failed", pr, request.Error, additionalFields, logger) } } } -func (s *Server) destroyKubeSpinWick(pr *model.PullRequest) *spinwick.Request { - mlog.Info("Received request to destroy kubernetes namespace") +func (s *Server) destroyKubeSpinWick(pr *model.PullRequest, logger logrus.FieldLogger) *spinwick.Request { + logger.Info("Received request to destroy kubernetes namespace") request := &spinwick.Request{ InstallationID: "n/a", Error: nil, @@ -787,8 +796,6 @@ func (s *Server) destroyKubeSpinWick(pr *model.PullRequest) *spinwick.Request { Aborted: false, } - logger := log.WithField("PR", fmt.Sprintf("%s: #%d", pr.RepoName, pr.Number)) - namespaceName := s.makeSpinWickID(pr.RepoName, pr.Number) kc, err := s.newClient(logger) @@ -808,12 +815,12 @@ func (s *Server) destroyKubeSpinWick(pr *model.PullRequest) *spinwick.Request { err = deleteNamespace(kc, namespaceName) if err != nil { - mlog.Error("Failed while deleting namespace", mlog.Err(err)) + logger.WithError(err).Error("Failed while deleting namespace") request.Error = err return request } request.InstallationID = namespaceName - mlog.Info("Kube namespace " + namespaceName + " has been destroyed") + logger.Infof("Kube namespace %s has been destroyed", namespaceName) headers := map[string]string{ "x-api-key": s.Config.AWSAPIKey, @@ -823,7 +830,7 @@ func (s *Server) destroyKubeSpinWick(pr *model.PullRequest) *spinwick.Request { OwnerID: namespaceName, }) if err != nil { - mlog.Error("Failed to get webhooks for spinwick", mlog.Err(err)) + logger.WithError(err).Error("Failed to get webhooks for spinwick") request.Error = err return request } @@ -831,7 +838,7 @@ func (s *Server) destroyKubeSpinWick(pr *model.PullRequest) *spinwick.Request { for _, webhook := range webhooks { err = cloudClient.DeleteWebhook(webhook.ID) if err != nil { - mlog.Error("Failed to delete webhook", mlog.Err(err)) + logger.WithError(err).Error("Failed to delete webhook") request.Error = err return request } @@ -840,7 +847,7 @@ func (s *Server) destroyKubeSpinWick(pr *model.PullRequest) *spinwick.Request { cwsClient := cws.NewClient(s.Config.CWSPublicAPIAddress, s.Config.CWSInternalAPIAddress, s.Config.CWSAPIKey) err = cwsClient.DeleteStripeWebhook(namespaceName) if err != nil { - mlog.Error("Failed to delete stripe webhook", mlog.Err(err)) + logger.WithError(err).Error("Failed to delete stripe webhook") request.Error = err return request } @@ -852,7 +859,7 @@ func (s *Server) destroyKubeSpinWick(pr *model.PullRequest) *spinwick.Request { if err != nil { return request.WithError(errors.Wrap(err, "unable to get list of old comments")).ShouldReportError() } - s.removeOldComments(comments, pr) + s.removeOldComments(comments, pr, logger) s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, "Spinwick CWS test server has been destroyed.") return request } @@ -860,7 +867,7 @@ func (s *Server) destroyKubeSpinWick(pr *model.PullRequest) *spinwick.Request { // destroyCloudSpinWickWithCWS destroys the Spinwick installation for the passed PR // using CWS so we can get rid of the installation but also for all the intermediate // metadata -func (s *Server) destroyCloudSpinWickWithCWS(pr *model.PullRequest) *spinwick.Request { +func (s *Server) destroyCloudSpinWickWithCWS(pr *model.PullRequest, logger logrus.FieldLogger) *spinwick.Request { request := &spinwick.Request{ InstallationID: "n/a", Error: nil, @@ -888,7 +895,7 @@ func (s *Server) destroyCloudSpinWickWithCWS(pr *model.PullRequest) *spinwick.Re request.InstallationID = installation.ID - mlog.Info("Found installation. Starting deletion...", mlog.String("id", installation.ID)) + logger.WithField("installation_id", installation.ID).Info("Found installation. Starting deletion...") err = cwsClient.DeleteInstallation(installation.ID) if err != nil { return request.WithInstallationID(installation.ID). @@ -898,7 +905,7 @@ func (s *Server) destroyCloudSpinWickWithCWS(pr *model.PullRequest) *spinwick.Re ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) defer cancel() - s.waitForInstallationIsDeleted(ctx, pr, request) + s.waitForInstallationIsDeleted(ctx, pr, request, logger) // Old comments created by MatterWick user will be deleted here. s.commentLock.Lock() @@ -907,7 +914,7 @@ func (s *Server) destroyCloudSpinWickWithCWS(pr *model.PullRequest) *spinwick.Re if err != nil { return request.WithError(errors.Wrap(err, "unable to get list of old comments")).ShouldReportError() } - s.removeOldComments(comments, pr) + s.removeOldComments(comments, pr, logger) s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, s.Config.DestroyedSpinmintMessage) return request } @@ -916,7 +923,7 @@ func (s *Server) destroyCloudSpinWickWithCWS(pr *model.PullRequest) *spinwick.Re // - no cloud installation found = empty ID string and no error // - cloud installation found and deleted = actual ID string and no error // - any errors = error is returned -func (s *Server) destroySpinWick(pr *model.PullRequest) *spinwick.Request { +func (s *Server) destroySpinWick(pr *model.PullRequest, logger logrus.FieldLogger) *spinwick.Request { request := &spinwick.Request{ InstallationID: "n/a", Error: nil, @@ -934,7 +941,7 @@ func (s *Server) destroySpinWick(pr *model.PullRequest) *spinwick.Request { } request.InstallationID = id - mlog.Info("Destroying SpinWick", mlog.Int("pr", pr.Number), mlog.String("repo_owner", pr.RepoOwner), mlog.String("repo_name", pr.RepoName), mlog.String("installation_id", request.InstallationID)) + logger.WithField("installation_id", request.InstallationID).Info("Destroying SpinWick") headers := map[string]string{ "x-api-key": s.Config.AWSAPIKey, @@ -953,14 +960,14 @@ func (s *Server) destroySpinWick(pr *model.PullRequest) *spinwick.Request { if err != nil { return request.WithError(errors.Wrap(err, "unable to get list of old comments")).ShouldReportError() } - s.removeOldComments(comments, pr) + s.removeOldComments(comments, pr, logger) s.sendGitHubComment(pr.RepoOwner, pr.RepoName, pr.Number, s.Config.DestroyedSpinmintMessage) return request } -func (s *Server) waitForInstallationStable(ctx context.Context, pr *model.PullRequest, request *spinwick.Request) { +func (s *Server) waitForInstallationStable(ctx context.Context, pr *model.PullRequest, request *spinwick.Request, logger logrus.FieldLogger) { channel, err := s.requestCloudWebhookChannel(request.InstallationID) if err != nil { request.WithError(err).ShouldReportError() @@ -978,7 +985,10 @@ func (s *Server) waitForInstallationStable(ctx context.Context, pr *model.PullRe continue } - mlog.Info("Installation changed state", mlog.String("installation", request.InstallationID), mlog.String("state", payload.NewState)) + logger.WithFields(logrus.Fields{ + "installation_id": request.InstallationID, + "state": payload.NewState, + }).Info("Installation changed state") switch payload.NewState { case cloudModel.InstallationStateStable: @@ -1012,7 +1022,7 @@ func (s *Server) waitForInstallationStable(ctx context.Context, pr *model.PullRe } } -func (s *Server) waitForInstallationIsDeleted(ctx context.Context, pr *model.PullRequest, request *spinwick.Request) { +func (s *Server) waitForInstallationIsDeleted(ctx context.Context, pr *model.PullRequest, request *spinwick.Request, logger logrus.FieldLogger) { channel, err := s.requestCloudWebhookChannel(request.InstallationID) if err != nil { request.WithError(err).ShouldReportError() @@ -1030,9 +1040,10 @@ func (s *Server) waitForInstallationIsDeleted(ctx context.Context, pr *model.Pul continue } - mlog.Info("Installation changed state", - mlog.String("installation", request.InstallationID), - mlog.String("state", payload.NewState)) + logger.WithFields(logrus.Fields{ + "installation_id": request.InstallationID, + "state": payload.NewState, + }).Info("Installation changed state") switch payload.NewState { case cloudModel.InstallationStateDeleted: @@ -1045,11 +1056,11 @@ func (s *Server) waitForInstallationIsDeleted(ctx context.Context, pr *model.Pul } } -func (s *Server) initializeMattermostTestServer(mmURL string, prNumber int) error { - mlog.Info("Initializing Mattermost installation") +func (s *Server) initializeMattermostTestServer(mmURL string, prNumber int, logger logrus.FieldLogger) error { + logger.Info("Initializing Mattermost installation") wait := 600 - mlog.Info("Waiting up to 600 seconds for DNS to propagate") + logger.Infof("Waiting up to %d seconds for DNS to propagate", wait) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(wait)*time.Second) defer cancel() @@ -1074,15 +1085,15 @@ func (s *Server) initializeMattermostTestServer(mmURL string, prNumber int) erro Email: "sysadmin@example.mattermost.com", Password: "Sys@dmin123", } - _, response := client.CreateUser(user) - if response.StatusCode != 201 { - return fmt.Errorf("error creating the initial mattermost user: status code = %d, message = %s", response.StatusCode, response.Error.Message) + _, _, err = client.CreateUser(user) + if err != nil { + return errors.Wrap(err, "failed to create initial mattermost user") } - client.Logout() - userLogged, response := client.Login("sysadmin", "Sys@dmin123") - if response.StatusCode != 200 { - return fmt.Errorf("error logging in with initial mattermost user: status code = %d, message = %s", response.StatusCode, response.Error.Message) + + userLogged, _, err := client.Login("sysadmin", "Sys@dmin123") + if err != nil { + return errors.Wrap(err, "failed to log in with initial mattermost user") } teamName := fmt.Sprintf("pr%d", prNumber) @@ -1091,14 +1102,14 @@ func (s *Server) initializeMattermostTestServer(mmURL string, prNumber int) erro DisplayName: teamName, Type: "O", } - firstTeam, response := client.CreateTeam(team) - if response.StatusCode != 201 { - return fmt.Errorf("error creating the initial team: status code = %d, message = %s", response.StatusCode, response.Error.Message) + firstTeam, _, err := client.CreateTeam(team) + if err != nil { + return errors.Wrap(err, "failed to log in with initial team") } - _, response = client.AddTeamMember(firstTeam.Id, userLogged.Id) - if response.StatusCode != 201 { - return fmt.Errorf("error adding sysadmin to the initial team: status code = %d, message = %s", response.StatusCode, response.Error.Message) + _, _, err = client.AddTeamMember(firstTeam.Id, userLogged.Id) + if err != nil { + return errors.Wrap(err, "failed adding admin user to initial team") } testUser := &mattermostModel.User{ @@ -1106,16 +1117,16 @@ func (s *Server) initializeMattermostTestServer(mmURL string, prNumber int) erro Email: "user-1@example.mattermost.com", Password: "User-1@123", } - testUser, response = client.CreateUser(testUser) - if response.StatusCode != 201 { - return fmt.Errorf("error creating the standard test user: status code = %d, message = %s", response.StatusCode, response.Error.Message) + testUser, _, err = client.CreateUser(testUser) + if err != nil { + return errors.Wrap(err, "failed to create standard test user") } - _, response = client.AddTeamMember(firstTeam.Id, testUser.Id) - if response.StatusCode != 201 { - return fmt.Errorf("error adding standard test user to the initial team: status code = %d, message = %s", response.StatusCode, response.Error.Message) + _, _, err = client.AddTeamMember(firstTeam.Id, testUser.Id) + if err != nil { + return errors.Wrap(err, "failed adding standard test user to initial team") } - mlog.Info("Mattermost configuration complete") + logger.Info("Mattermost configuration complete") return nil } @@ -1138,8 +1149,8 @@ func checkDNS(ctx context.Context, url string) error { func checkMMPing(ctx context.Context, client *mattermostModel.Client4) error { for { - status, response := client.GetPing() - if response.StatusCode == 200 && status == "OK" { + _, response, _ := client.GetPing() + if response.StatusCode == http.StatusOK { return nil } @@ -1215,16 +1226,16 @@ func (s *Server) isSpinWickCloudWithCWSLabel(labels []string) bool { return false } -func (s *Server) removeCommentsWithSpecificMessages(comments []*github.IssueComment, serverMessages []string, pr *model.PullRequest) { - mlog.Info("Removing old spinwick MatterWick comments") +func (s *Server) removeCommentsWithSpecificMessages(comments []*github.IssueComment, serverMessages []string, pr *model.PullRequest, logger logrus.FieldLogger) { + logger.Info("Removing old spinwick MatterWick comments") for _, comment := range comments { if *comment.User.Login == s.Config.Username { for _, message := range serverMessages { if strings.Contains(*comment.Body, message) { - mlog.Info("Removing old spinwick comment with ID", mlog.Int64("ID", *comment.ID)) + logger.WithField("comment_id", *comment.ID).Info("Removing old spinwick comment with ID") _, err := newGithubClient(s.Config.GithubAccessToken).Issues.DeleteComment(context.Background(), pr.RepoOwner, pr.RepoName, *comment.ID) if err != nil { - mlog.Error("Unable to remove old spinwick MatterWick comment", mlog.Err(err)) + logger.WithError(err).Error("Unable to remove old spinwick MatterWick comment") } break } diff --git a/server/utils.go b/server/utils.go index 027e250e..f91caeec 100644 --- a/server/utils.go +++ b/server/utils.go @@ -3,18 +3,18 @@ package server import ( "fmt" - "github.com/mattermost/mattermost-server/v5/mlog" "github.com/mattermost/matterwick/model" + "github.com/sirupsen/logrus" ) func (s *Server) logErrorToMattermost(msg string, args ...interface{}) { if s.Config.MattermostWebhookURL == "" { - mlog.Warn("No Mattermost webhook URL set: unable to send message") + s.Logger.Warn("No Mattermost webhook URL set: unable to send message") return } webhookMessage := fmt.Sprintf(msg, args...) - mlog.Debug("Sending Mattermost message", mlog.String("message", webhookMessage)) + s.Logger.WithField("message", webhookMessage).Debug("Sending Mattermost message") if s.Config.MattermostWebhookFooter != "" { webhookMessage += "\n---\n" + s.Config.MattermostWebhookFooter @@ -23,17 +23,17 @@ func (s *Server) logErrorToMattermost(msg string, args ...interface{}) { webhookRequest := &WebhookRequest{Username: "MatterWick", Text: webhookMessage} if err := s.sendToWebhook(webhookRequest); err != nil { - mlog.Error("Unable to post to Mattermost webhook", mlog.Err(err)) + s.Logger.WithError(err).Error("Unable to post to Mattermost webhook") } } -func (s *Server) logPrettyErrorToMattermost(msg string, pr *model.PullRequest, err error, additionalFields map[string]string) { +func (s *Server) logPrettyErrorToMattermost(msg string, pr *model.PullRequest, err error, additionalFields map[string]string, logger logrus.FieldLogger) { if s.Config.MattermostWebhookURL == "" { - mlog.Warn("No Mattermost webhook URL set: unable to send message") + logger.Warn("No Mattermost webhook URL set: unable to send message") return } - mlog.Debug("Sending Mattermost message", mlog.String("message", msg)) + logger.WithField("message", msg).Debug("Sending Mattermost message") fullMessage := fmt.Sprintf("%s\n---\nError: %s\nRepository: %s/%s\nPull Request: %d [ status=%s ]\nURL: %s\n", msg, @@ -50,7 +50,7 @@ func (s *Server) logPrettyErrorToMattermost(msg string, pr *model.PullRequest, e webhookRequest := &WebhookRequest{Username: "MatterWick", Text: fullMessage} if err := s.sendToWebhook(webhookRequest); err != nil { - mlog.Error("Unable to post to Mattermost webhook", mlog.Err(err)) + logger.WithError(err).Error("Unable to post to Mattermost webhook") } } diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml new file mode 100644 index 00000000..ba95cdd1 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go: + - 1.3.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/github.com/dustin/go-humanize/LICENSE similarity index 88% rename from vendor/go.uber.org/multierr/LICENSE.txt rename to vendor/github.com/dustin/go-humanize/LICENSE index 413e30f7..8d9a94a9 100644 --- a/vendor/go.uber.org/multierr/LICENSE.txt +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2017-2021 Uber Technologies, Inc. +Copyright (c) 2005-2008 Dustin Sallings Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -15,5 +15,7 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 00000000..91b4ae56 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,124 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://godoc.org/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +## English-specific functions + +The following functions are in the `humanize/english` subpackage. + +### Plurals + +Simple English pluralization + +```go +english.PluralWord(1, "object", "") // object +english.PluralWord(42, "object", "") // objects +english.PluralWord(2, "bus", "") // buses +english.PluralWord(99, "locus", "loci") // loci + +english.Plural(1, "object", "") // 1 object +english.Plural(42, "object", "") // 42 objects +english.Plural(2, "bus", "") // 2 buses +english.Plural(99, "locus", "loci") // 99 loci +``` + +### Word series + +Format comma-separated words lists with conjuctions: + +```go +english.WordSeries([]string{"foo"}, "and") // foo +english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar +english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz + +english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 00000000..f49dc337 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 00000000..1a2bf617 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,173 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 00000000..0b498f48 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 00000000..520ae3e5 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,116 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // Min int64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// CommafWithDigits works like the Commaf but limits the resulting +// string to the given number of decimal places. +// +// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 +func CommafWithDigits(f float64, decimals int) string { + return stripTrailingDigits(Commaf(f), decimals) +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 00000000..620690de --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,40 @@ +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 00000000..1c62b640 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,46 @@ +package humanize + +import ( + "strconv" + "strings" +) + +func stripTrailingZeros(s string) string { + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +func stripTrailingDigits(s string, digits int) string { + if i := strings.Index(s, "."); i >= 0 { + if digits <= 0 { + return s[:i] + } + i++ + if i+digits >= len(s) { + return s + } + return s[:i+digits] + } + return s +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} + +// FtoaWithDigits converts a float to a string but limits the resulting string +// to the given number of decimal places, and no trailing zeros. +func FtoaWithDigits(num float64, digits int) string { + return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 00000000..a2c2da31 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 00000000..dec61865 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < -math.MaxFloat64 { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 00000000..43d88a86 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 00000000..ae659e0e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,123 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +// SIWithDigits works like SI but limits the resulting string to the +// given number of decimal places. +// +// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB +// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF +func SIWithDigits(input float64, decimals int, unit string) string { + value, prefix := ComputeSI(input) + return FtoaWithDigits(value, decimals) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 00000000..dd3fbf5e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D > diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index dc2b7e51..4bce5936 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -568,29 +568,6 @@ func (p Patch) replace(doc *container, op Operation) error { return errors.Wrapf(err, "replace operation failed to decode path") } - if path == "" { - val := op.value() - - if val.which == eRaw { - if !val.tryDoc() { - if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") - } - } - } - - switch val.which { - case eAry: - *doc = &val.ary - case eDoc: - *doc = &val.doc - case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") - } - - return nil - } - con, key := findObject(doc, path) if con == nil { @@ -657,25 +634,6 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(err, "test operation failed to decode path") } - if path == "" { - var self lazyNode - - switch sv := (*doc).(type) { - case *partialDoc: - self.doc = *sv - self.which = eDoc - case *partialArray: - self.ary = *sv - self.which = eAry - } - - if self.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - con, key := findObject(doc, path) if con == nil { diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ad825f5f..ab593118 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) ## FAQ diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c05482a2..c3b56b3d 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -115,6 +115,15 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// // Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but diff --git a/vendor/github.com/graph-gophers/graphql-go/.gitignore b/vendor/github.com/graph-gophers/graphql-go/.gitignore new file mode 100644 index 00000000..2fa95abe --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/.gitignore @@ -0,0 +1,5 @@ +/.idea +/.vscode +/internal/validation/testdata/graphql-js +/internal/validation/testdata/node_modules +/vendor diff --git a/vendor/github.com/graph-gophers/graphql-go/.golangci.yml b/vendor/github.com/graph-gophers/graphql-go/.golangci.yml new file mode 100644 index 00000000..c6741d58 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/.golangci.yml @@ -0,0 +1,35 @@ +run: + timeout: 5m + +linters-settings: + gofmt: + simplify: true + govet: + check-shadowing: true + enable-all: true + disable: + - fieldalignment + - deepequalerrors # remove later + +linters: + disable-all: true + enable: + - deadcode + - gofmt + - gosimple + - govet + - ineffassign + - exportloopref + - structcheck + - staticcheck + - unconvert + - unused + - varcheck + - misspell + - goimports + +issues: + exclude-rules: + - linters: + - unused + path: "graphql_test.go" \ No newline at end of file diff --git a/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md b/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md new file mode 100644 index 00000000..e5f48c06 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md @@ -0,0 +1,10 @@ +CHANGELOG + +[v1.1.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.1.0) Release v1.1.0 +* [FEATURE] Add types package #437 +* [FEATURE] Expose `packer.Unmarshaler` as `decode.Unmarshaler` to the public #450 +* [FEATURE] Add location fields to type definitions #454 +* [FEATURE] `errors.Errorf` preserves original error similar to `fmt.Errorf` #456 +* [BUGFIX] Fix duplicated __typename in response (fixes #369) #443 + +[v1.0.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.0.0) Initial release diff --git a/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md b/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md new file mode 100644 index 00000000..a2cffca8 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md @@ -0,0 +1,13 @@ +## Contributing + +- With issues: + - Use the search tool before opening a new issue. + - Please provide source code and commit sha if you found a bug. + - Review existing issues and provide feedback or react to them. + +- With pull requests: + - Open your pull request against `master` + - Your pull request should have no more than two commits, if not you should squash them. + - It should pass all tests in the available continuous integrations systems such as TravisCI. + - You should add/modify tests to cover your proposed code changes. + - If your pull request contains a new feature, please document it on the README. \ No newline at end of file diff --git a/vendor/github.com/graph-gophers/graphql-go/LICENSE b/vendor/github.com/graph-gophers/graphql-go/LICENSE new file mode 100644 index 00000000..3907ceca --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2016 Richard Musiol. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/graph-gophers/graphql-go/README.md b/vendor/github.com/graph-gophers/graphql-go/README.md new file mode 100644 index 00000000..87b020c1 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/README.md @@ -0,0 +1,169 @@ +# graphql-go [![Sourcegraph](https://sourcegraph.com/github.com/graph-gophers/graphql-go/-/badge.svg)](https://sourcegraph.com/github.com/graph-gophers/graphql-go?badge) [![Build Status](https://graph-gophers.semaphoreci.com/badges/graphql-go/branches/master.svg?style=shields)](https://graph-gophers.semaphoreci.com/projects/graphql-go) [![GoDoc](https://godoc.org/github.com/graph-gophers/graphql-go?status.svg)](https://godoc.org/github.com/graph-gophers/graphql-go) + +

+ +The goal of this project is to provide full support of the [GraphQL draft specification](https://facebook.github.io/graphql/draft) with a set of idiomatic, easy to use Go packages. + +While still under heavy development (`internal` APIs are almost certainly subject to change), this library is +safe for production use. + +## Features + +- minimal API +- support for `context.Context` +- support for the `OpenTracing` standard +- schema type-checking against resolvers +- resolvers are matched to the schema based on method sets (can resolve a GraphQL schema with a Go interface or Go struct). +- handles panics in resolvers +- parallel execution of resolvers +- subscriptions + - [sample WS transport](https://github.com/graph-gophers/graphql-transport-ws) + +## Roadmap + +We're trying out the GitHub Project feature to manage `graphql-go`'s [development roadmap](https://github.com/graph-gophers/graphql-go/projects/1). +Feedback is welcome and appreciated. + +## (Some) Documentation + +### Basic Sample + +```go +package main + +import ( + "log" + "net/http" + + graphql "github.com/graph-gophers/graphql-go" + "github.com/graph-gophers/graphql-go/relay" +) + +type query struct{} + +func (_ *query) Hello() string { return "Hello, world!" } + +func main() { + s := ` + type Query { + hello: String! + } + ` + schema := graphql.MustParseSchema(s, &query{}) + http.Handle("/query", &relay.Handler{Schema: schema}) + log.Fatal(http.ListenAndServe(":8080", nil)) +} +``` + +To test: + +```sh +curl -XPOST -d '{"query": "{ hello }"}' localhost:8080/query +``` + +### Resolvers + +A resolver must have one method or field for each field of the GraphQL type it resolves. The method or field name has to be [exported](https://golang.org/ref/spec#Exported_identifiers) and match the schema's field's name in a non-case-sensitive way. +You can use struct fields as resolvers by using `SchemaOpt: UseFieldResolvers()`. For example, +``` +opts := []graphql.SchemaOpt{graphql.UseFieldResolvers()} +schema := graphql.MustParseSchema(s, &query{}, opts...) +``` + +When using `UseFieldResolvers` schema option, a struct field will be used *only* when: +- there is no method for a struct field +- a struct field does not implement an interface method +- a struct field does not have arguments + +The method has up to two arguments: + +- Optional `context.Context` argument. +- Mandatory `*struct { ... }` argument if the corresponding GraphQL field has arguments. The names of the struct fields have to be [exported](https://golang.org/ref/spec#Exported_identifiers) and have to match the names of the GraphQL arguments in a non-case-sensitive way. + +The method has up to two results: + +- The GraphQL field's value as determined by the resolver. +- Optional `error` result. + +Example for a simple resolver method: + +```go +func (r *helloWorldResolver) Hello() string { + return "Hello world!" +} +``` + +The following signature is also allowed: + +```go +func (r *helloWorldResolver) Hello(ctx context.Context) (string, error) { + return "Hello world!", nil +} +``` + +### Schema Options + +- `UseStringDescriptions()` enables the usage of double quoted and triple quoted. When this is not enabled, comments are parsed as descriptions instead. +- `UseFieldResolvers()` specifies whether to use struct field resolvers. +- `MaxDepth(n int)` specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking. +- `MaxParallelism(n int)` specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10. +- `Tracer(tracer trace.Tracer)` is used to trace queries and fields. It defaults to `trace.OpenTracingTracer`. +- `ValidationTracer(tracer trace.ValidationTracer)` is used to trace validation errors. It defaults to `trace.NoopValidationTracer`. +- `Logger(logger log.Logger)` is used to log panics during query execution. It defaults to `exec.DefaultLogger`. +- `PanicHandler(panicHandler errors.PanicHandler)` is used to transform panics into errors during query execution. It defaults to `errors.DefaultPanicHandler`. +- `DisableIntrospection()` disables introspection queries. + +### Custom Errors + +Errors returned by resolvers can include custom extensions by implementing the `ResolverError` interface: + +```go +type ResolverError interface { + error + Extensions() map[string]interface{} +} +``` + +Example of a simple custom error: + +```go +type droidNotFoundError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (e droidNotFoundError) Error() string { + return fmt.Sprintf("error [%s]: %s", e.Code, e.Message) +} + +func (e droidNotFoundError) Extensions() map[string]interface{} { + return map[string]interface{}{ + "code": e.Code, + "message": e.Message, + } +} +``` + +Which could produce a GraphQL error such as: + +```go +{ + "errors": [ + { + "message": "error [NotFound]: This is not the droid you are looking for", + "path": [ + "droid" + ], + "extensions": { + "code": "NotFound", + "message": "This is not the droid you are looking for" + } + } + ], + "data": null +} +``` + +### [Examples](https://github.com/graph-gophers/graphql-go/wiki/Examples) + +### [Companies that use this library](https://github.com/graph-gophers/graphql-go/wiki/Users) diff --git a/vendor/github.com/graph-gophers/graphql-go/decode/decode.go b/vendor/github.com/graph-gophers/graphql-go/decode/decode.go new file mode 100644 index 00000000..56a9d5b5 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/decode/decode.go @@ -0,0 +1,13 @@ +package decode + +// Unmarshaler defines the api of Go types mapped to custom GraphQL scalar types +type Unmarshaler interface { + // ImplementsGraphQLType maps the implementing custom Go type + // to the GraphQL scalar type in the schema. + ImplementsGraphQLType(name string) bool + // UnmarshalGraphQL is the custom unmarshaler for the implementing type + // + // This function will be called whenever you use the + // custom GraphQL scalar type as an input + UnmarshalGraphQL(input interface{}) error +} diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/errors.go b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go new file mode 100644 index 00000000..0f9340b1 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go @@ -0,0 +1,59 @@ +package errors + +import ( + "fmt" +) + +type QueryError struct { + Err error `json:"-"` // Err holds underlying if available + Message string `json:"message"` + Locations []Location `json:"locations,omitempty"` + Path []interface{} `json:"path,omitempty"` + Rule string `json:"-"` + ResolverError error `json:"-"` + Extensions map[string]interface{} `json:"extensions,omitempty"` +} + +type Location struct { + Line int `json:"line"` + Column int `json:"column"` +} + +func (a Location) Before(b Location) bool { + return a.Line < b.Line || (a.Line == b.Line && a.Column < b.Column) +} + +func Errorf(format string, a ...interface{}) *QueryError { + // similar to fmt.Errorf, Errorf will wrap the last argument if it is an instance of error + var err error + if n := len(a); n > 0 { + if v, ok := a[n-1].(error); ok { + err = v + } + } + + return &QueryError{ + Err: err, + Message: fmt.Sprintf(format, a...), + } +} + +func (err *QueryError) Error() string { + if err == nil { + return "" + } + str := fmt.Sprintf("graphql: %s", err.Message) + for _, loc := range err.Locations { + str += fmt.Sprintf(" (line %d, column %d)", loc.Line, loc.Column) + } + return str +} + +func (err *QueryError) Unwrap() error { + if err == nil { + return nil + } + return err.Err +} + +var _ error = &QueryError{} diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go b/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go new file mode 100644 index 00000000..5446c2a9 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go @@ -0,0 +1,18 @@ +package errors + +import ( + "context" +) + +// PanicHandler is the interface used to create custom panic errors that occur during query execution +type PanicHandler interface { + MakePanicError(ctx context.Context, value interface{}) *QueryError +} + +// DefaultPanicHandler is the default PanicHandler +type DefaultPanicHandler struct{} + +// MakePanicError creates a new QueryError from a panic that occurred during execution +func (h *DefaultPanicHandler) MakePanicError(ctx context.Context, value interface{}) *QueryError { + return Errorf("panic occurred: %v", value) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/graphql.go b/vendor/github.com/graph-gophers/graphql-go/graphql.go new file mode 100644 index 00000000..76a6434d --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/graphql.go @@ -0,0 +1,339 @@ +package graphql + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/internal/exec" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/exec/selected" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/internal/schema" + "github.com/graph-gophers/graphql-go/internal/validation" + "github.com/graph-gophers/graphql-go/introspection" + "github.com/graph-gophers/graphql-go/log" + "github.com/graph-gophers/graphql-go/trace" + "github.com/graph-gophers/graphql-go/types" +) + +// ParseSchema parses a GraphQL schema and attaches the given root resolver. It returns an error if +// the Go type signature of the resolvers does not match the schema. If nil is passed as the +// resolver, then the schema can not be executed, but it may be inspected (e.g. with ToJSON). +func ParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) (*Schema, error) { + s := &Schema{ + schema: schema.New(), + maxParallelism: 10, + tracer: trace.OpenTracingTracer{}, + logger: &log.DefaultLogger{}, + panicHandler: &errors.DefaultPanicHandler{}, + } + for _, opt := range opts { + opt(s) + } + + if s.validationTracer == nil { + if tracer, ok := s.tracer.(trace.ValidationTracerContext); ok { + s.validationTracer = tracer + } else { + s.validationTracer = &validationBridgingTracer{tracer: trace.NoopValidationTracer{}} + } + } + + if err := schema.Parse(s.schema, schemaString, s.useStringDescriptions); err != nil { + return nil, err + } + if err := s.validateSchema(); err != nil { + return nil, err + } + + r, err := resolvable.ApplyResolver(s.schema, resolver) + if err != nil { + return nil, err + } + s.res = r + + return s, nil +} + +// MustParseSchema calls ParseSchema and panics on error. +func MustParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) *Schema { + s, err := ParseSchema(schemaString, resolver, opts...) + if err != nil { + panic(err) + } + return s +} + +// Schema represents a GraphQL schema with an optional resolver. +type Schema struct { + schema *types.Schema + res *resolvable.Schema + + maxDepth int + maxParallelism int + tracer trace.Tracer + validationTracer trace.ValidationTracerContext + logger log.Logger + panicHandler errors.PanicHandler + useStringDescriptions bool + disableIntrospection bool + subscribeResolverTimeout time.Duration +} + +func (s *Schema) ASTSchema() *types.Schema { + return s.schema +} + +// SchemaOpt is an option to pass to ParseSchema or MustParseSchema. +type SchemaOpt func(*Schema) + +// UseStringDescriptions enables the usage of double quoted and triple quoted +// strings as descriptions as per the June 2018 spec +// https://facebook.github.io/graphql/June2018/. When this is not enabled, +// comments are parsed as descriptions instead. +func UseStringDescriptions() SchemaOpt { + return func(s *Schema) { + s.useStringDescriptions = true + } +} + +// UseFieldResolvers specifies whether to use struct field resolvers +func UseFieldResolvers() SchemaOpt { + return func(s *Schema) { + s.schema.UseFieldResolvers = true + } +} + +// MaxDepth specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking. +func MaxDepth(n int) SchemaOpt { + return func(s *Schema) { + s.maxDepth = n + } +} + +// MaxParallelism specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10. +func MaxParallelism(n int) SchemaOpt { + return func(s *Schema) { + s.maxParallelism = n + } +} + +// Tracer is used to trace queries and fields. It defaults to trace.OpenTracingTracer. +func Tracer(tracer trace.Tracer) SchemaOpt { + return func(s *Schema) { + s.tracer = tracer + } +} + +// ValidationTracer is used to trace validation errors. It defaults to trace.NoopValidationTracer. +// Deprecated: context is needed to support tracing correctly. Use a Tracer which implements trace.ValidationTracerContext. +func ValidationTracer(tracer trace.ValidationTracer) SchemaOpt { //nolint:staticcheck + return func(s *Schema) { + s.validationTracer = &validationBridgingTracer{tracer: tracer} + } +} + +// Logger is used to log panics during query execution. It defaults to exec.DefaultLogger. +func Logger(logger log.Logger) SchemaOpt { + return func(s *Schema) { + s.logger = logger + } +} + +// PanicHandler is used to customize the panic errors during query execution. +// It defaults to errors.DefaultPanicHandler. +func PanicHandler(panicHandler errors.PanicHandler) SchemaOpt { + return func(s *Schema) { + s.panicHandler = panicHandler + } +} + +// DisableIntrospection disables introspection queries. +func DisableIntrospection() SchemaOpt { + return func(s *Schema) { + s.disableIntrospection = true + } +} + +// SubscribeResolverTimeout is an option to control the amount of time +// we allow for a single subscribe message resolver to complete it's job +// before it times out and returns an error to the subscriber. +func SubscribeResolverTimeout(timeout time.Duration) SchemaOpt { + return func(s *Schema) { + s.subscribeResolverTimeout = timeout + } +} + +// Response represents a typical response of a GraphQL server. It may be encoded to JSON directly or +// it may be further processed to a custom response type, for example to include custom error data. +// Errors are intentionally serialized first based on the advice in https://github.com/facebook/graphql/commit/7b40390d48680b15cb93e02d46ac5eb249689876#diff-757cea6edf0288677a9eea4cfc801d87R107 +type Response struct { + Errors []*errors.QueryError `json:"errors,omitempty"` + Data json.RawMessage `json:"data,omitempty"` + Extensions map[string]interface{} `json:"extensions,omitempty"` +} + +// Validate validates the given query with the schema. +func (s *Schema) Validate(queryString string) []*errors.QueryError { + return s.ValidateWithVariables(queryString, nil) +} + +// ValidateWithVariables validates the given query with the schema and the input variables. +func (s *Schema) ValidateWithVariables(queryString string, variables map[string]interface{}) []*errors.QueryError { + doc, qErr := query.Parse(queryString) + if qErr != nil { + return []*errors.QueryError{qErr} + } + + return validation.Validate(s.schema, doc, variables, s.maxDepth) +} + +// Exec executes the given query with the schema's resolver. It panics if the schema was created +// without a resolver. If the context get cancelled, no further resolvers will be called and a +// the context error will be returned as soon as possible (not immediately). +func (s *Schema) Exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) *Response { + if !s.res.Resolver.IsValid() { + panic("schema created without resolver, can not exec") + } + return s.exec(ctx, queryString, operationName, variables, s.res) +} + +func (s *Schema) exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) *Response { + doc, qErr := query.Parse(queryString) + if qErr != nil { + return &Response{Errors: []*errors.QueryError{qErr}} + } + + validationFinish := s.validationTracer.TraceValidation(ctx) + errs := validation.Validate(s.schema, doc, variables, s.maxDepth) + validationFinish(errs) + if len(errs) != 0 { + return &Response{Errors: errs} + } + + op, err := getOperation(doc, operationName) + if err != nil { + return &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}} + } + + // If the optional "operationName" POST parameter is not provided then + // use the query's operation name for improved tracing. + if operationName == "" { + operationName = op.Name.Name + } + + // Subscriptions are not valid in Exec. Use schema.Subscribe() instead. + if op.Type == query.Subscription { + return &Response{Errors: []*errors.QueryError{{Message: "graphql-ws protocol header is missing"}}} + } + if op.Type == query.Mutation { + if _, ok := s.schema.EntryPoints["mutation"]; !ok { + return &Response{Errors: []*errors.QueryError{{Message: "no mutations are offered by the schema"}}} + } + } + + // Fill in variables with the defaults from the operation + if variables == nil { + variables = make(map[string]interface{}, len(op.Vars)) + } + for _, v := range op.Vars { + if _, ok := variables[v.Name.Name]; !ok && v.Default != nil { + variables[v.Name.Name] = v.Default.Deserialize(nil) + } + } + + r := &exec.Request{ + Request: selected.Request{ + Doc: doc, + Vars: variables, + Schema: s.schema, + DisableIntrospection: s.disableIntrospection, + }, + Limiter: make(chan struct{}, s.maxParallelism), + Tracer: s.tracer, + Logger: s.logger, + PanicHandler: s.panicHandler, + } + varTypes := make(map[string]*introspection.Type) + for _, v := range op.Vars { + t, err := common.ResolveType(v.Type, s.schema.Resolve) + if err != nil { + return &Response{Errors: []*errors.QueryError{err}} + } + varTypes[v.Name.Name] = introspection.WrapType(t) + } + traceCtx, finish := s.tracer.TraceQuery(ctx, queryString, operationName, variables, varTypes) + data, errs := r.Execute(traceCtx, res, op) + finish(errs) + + return &Response{ + Data: data, + Errors: errs, + } +} + +func (s *Schema) validateSchema() error { + // https://graphql.github.io/graphql-spec/June2018/#sec-Root-Operation-Types + // > The query root operation type must be provided and must be an Object type. + if err := validateRootOp(s.schema, "query", true); err != nil { + return err + } + // > The mutation root operation type is optional; if it is not provided, the service does not support mutations. + // > If it is provided, it must be an Object type. + if err := validateRootOp(s.schema, "mutation", false); err != nil { + return err + } + // > Similarly, the subscription root operation type is also optional; if it is not provided, the service does not + // > support subscriptions. If it is provided, it must be an Object type. + if err := validateRootOp(s.schema, "subscription", false); err != nil { + return err + } + return nil +} + +type validationBridgingTracer struct { + tracer trace.ValidationTracer //nolint:staticcheck +} + +func (t *validationBridgingTracer) TraceValidation(context.Context) trace.TraceValidationFinishFunc { + return t.tracer.TraceValidation() +} + +func validateRootOp(s *types.Schema, name string, mandatory bool) error { + t, ok := s.EntryPoints[name] + if !ok { + if mandatory { + return fmt.Errorf("root operation %q must be defined", name) + } + return nil + } + if t.Kind() != "OBJECT" { + return fmt.Errorf("root operation %q must be an OBJECT", name) + } + return nil +} + +func getOperation(document *types.ExecutableDefinition, operationName string) (*types.OperationDefinition, error) { + if len(document.Operations) == 0 { + return nil, fmt.Errorf("no operations in query document") + } + + if operationName == "" { + if len(document.Operations) > 1 { + return nil, fmt.Errorf("more than one operation in query document and no operation name given") + } + for _, op := range document.Operations { + return op, nil // return the one and only operation + } + } + + op := document.Operations.Get(operationName) + if op == nil { + return nil, fmt.Errorf("no operation with name %q", operationName) + } + return op, nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/id.go b/vendor/github.com/graph-gophers/graphql-go/id.go new file mode 100644 index 00000000..80bdac90 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/id.go @@ -0,0 +1,30 @@ +package graphql + +import ( + "fmt" + "strconv" +) + +// ID represents GraphQL's "ID" scalar type. A custom type may be used instead. +type ID string + +func (ID) ImplementsGraphQLType(name string) bool { + return name == "ID" +} + +func (id *ID) UnmarshalGraphQL(input interface{}) error { + var err error + switch input := input.(type) { + case string: + *id = ID(input) + case int32: + *id = ID(strconv.Itoa(int(input))) + default: + err = fmt.Errorf("wrong type for ID: %T", input) + } + return err +} + +func (id ID) MarshalJSON() ([]byte, error) { + return strconv.AppendQuote(nil, string(id)), nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go new file mode 100644 index 00000000..1f7fe813 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go @@ -0,0 +1,103 @@ +// MIT License +// +// Copyright (c) 2019 GraphQL Contributors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// This implementation has been adapted from the graphql-js reference implementation +// https://github.com/graphql/graphql-js/blob/5eb7c4ded7ceb83ac742149cbe0dae07a8af9a30/src/language/blockString.js +// which is released under the MIT License above. + +package common + +import ( + "strings" +) + +// Produces the value of a block string from its parsed raw value, similar to +// CoffeeScript's block string, Python's docstring trim or Ruby's strip_heredoc. +// +// This implements the GraphQL spec's BlockStringValue() static algorithm. +func blockString(raw string) string { + lines := strings.Split(raw, "\n") + + // Remove common indentation from all lines except the first (which has none) + ind := blockStringIndentation(lines) + if ind > 0 { + for i := 1; i < len(lines); i++ { + l := lines[i] + if len(l) < ind { + lines[i] = "" + continue + } + lines[i] = l[ind:] + } + } + + // Remove leading and trailing blank lines + trimStart := 0 + for i := 0; i < len(lines) && isBlank(lines[i]); i++ { + trimStart++ + } + lines = lines[trimStart:] + trimEnd := 0 + for i := len(lines) - 1; i > 0 && isBlank(lines[i]); i-- { + trimEnd++ + } + lines = lines[:len(lines)-trimEnd] + + return strings.Join(lines, "\n") +} + +func blockStringIndentation(lines []string) int { + var commonIndent *int + for i := 1; i < len(lines); i++ { + l := lines[i] + indent := leadingWhitespace(l) + if indent == len(l) { + // don't consider blank/empty lines + continue + } + if indent == 0 { + return 0 + } + if commonIndent == nil || indent < *commonIndent { + commonIndent = &indent + } + } + if commonIndent == nil { + return 0 + } + return *commonIndent +} + +func isBlank(s string) bool { + return len(s) == 0 || leadingWhitespace(s) == len(s) +} + +func leadingWhitespace(s string) int { + i := 0 + for _, r := range s { + if r != '\t' && r != ' ' { + break + } + i++ + } + return i +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go new file mode 100644 index 00000000..f767e28f --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go @@ -0,0 +1,18 @@ +package common + +import "github.com/graph-gophers/graphql-go/types" + +func ParseDirectives(l *Lexer) types.DirectiveList { + var directives types.DirectiveList + for l.Peek() == '@' { + l.ConsumeToken('@') + d := &types.Directive{} + d.Name = l.ConsumeIdentWithLoc() + d.Name.Loc.Column-- + if l.Peek() == '(' { + d.Arguments = ParseArgumentList(l) + } + directives = append(directives, d) + } + return directives +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go new file mode 100644 index 00000000..ff45bcad --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go @@ -0,0 +1,229 @@ +package common + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/types" +) + +type syntaxError string + +type Lexer struct { + sc *scanner.Scanner + next rune + comment bytes.Buffer + useStringDescriptions bool +} + +type Ident struct { + Name string + Loc errors.Location +} + +func NewLexer(s string, useStringDescriptions bool) *Lexer { + sc := &scanner.Scanner{ + Mode: scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings, + } + sc.Init(strings.NewReader(s)) + + l := Lexer{sc: sc, useStringDescriptions: useStringDescriptions} + l.sc.Error = l.CatchScannerError + + return &l +} + +func (l *Lexer) CatchSyntaxError(f func()) (errRes *errors.QueryError) { + defer func() { + if err := recover(); err != nil { + if err, ok := err.(syntaxError); ok { + errRes = errors.Errorf("syntax error: %s", err) + errRes.Locations = []errors.Location{l.Location()} + return + } + panic(err) + } + }() + + f() + return +} + +func (l *Lexer) Peek() rune { + return l.next +} + +// ConsumeWhitespace consumes whitespace and tokens equivalent to whitespace (e.g. commas and comments). +// +// Consumed comment characters will build the description for the next type or field encountered. +// The description is available from `DescComment()`, and will be reset every time `ConsumeWhitespace()` is +// executed unless l.useStringDescriptions is set. +func (l *Lexer) ConsumeWhitespace() { + l.comment.Reset() + for { + l.next = l.sc.Scan() + + if l.next == ',' { + // Similar to white space and line terminators, commas (',') are used to improve the + // legibility of source text and separate lexical tokens but are otherwise syntactically and + // semantically insignificant within GraphQL documents. + // + // http://facebook.github.io/graphql/draft/#sec-Insignificant-Commas + continue + } + + if l.next == '#' { + // GraphQL source documents may contain single-line comments, starting with the '#' marker. + // + // A comment can contain any Unicode code point except `LineTerminator` so a comment always + // consists of all code points starting with the '#' character up to but not including the + // line terminator. + l.consumeComment() + continue + } + + break + } +} + +// consumeDescription optionally consumes a description based on the June 2018 graphql spec if any are present. +// +// Single quote strings are also single line. Triple quote strings can be multi-line. Triple quote strings +// whitespace trimmed on both ends. +// If a description is found, consume any following comments as well +// +// http://facebook.github.io/graphql/June2018/#sec-Descriptions +func (l *Lexer) consumeDescription() string { + // If the next token is not a string, we don't consume it + if l.next != scanner.String { + return "" + } + // Triple quote string is an empty "string" followed by an open quote due to the way the parser treats strings as one token + var desc string + if l.sc.Peek() == '"' { + desc = l.consumeTripleQuoteComment() + } else { + desc = l.consumeStringComment() + } + l.ConsumeWhitespace() + return desc +} + +func (l *Lexer) ConsumeIdent() string { + name := l.sc.TokenText() + l.ConsumeToken(scanner.Ident) + return name +} + +func (l *Lexer) ConsumeIdentWithLoc() types.Ident { + loc := l.Location() + name := l.sc.TokenText() + l.ConsumeToken(scanner.Ident) + return types.Ident{Name: name, Loc: loc} +} + +func (l *Lexer) ConsumeKeyword(keyword string) { + if l.next != scanner.Ident || l.sc.TokenText() != keyword { + l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %q", l.sc.TokenText(), keyword)) + } + l.ConsumeWhitespace() +} + +func (l *Lexer) ConsumeLiteral() *types.PrimitiveValue { + lit := &types.PrimitiveValue{Type: l.next, Text: l.sc.TokenText()} + l.ConsumeWhitespace() + return lit +} + +func (l *Lexer) ConsumeToken(expected rune) { + if l.next != expected { + l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %s", l.sc.TokenText(), scanner.TokenString(expected))) + } + l.ConsumeWhitespace() +} + +func (l *Lexer) DescComment() string { + comment := l.comment.String() + desc := l.consumeDescription() + if l.useStringDescriptions { + return desc + } + return comment +} + +func (l *Lexer) SyntaxError(message string) { + panic(syntaxError(message)) +} + +func (l *Lexer) Location() errors.Location { + return errors.Location{ + Line: l.sc.Line, + Column: l.sc.Column, + } +} + +func (l *Lexer) consumeTripleQuoteComment() string { + l.next = l.sc.Next() + if l.next != '"' { + panic("consumeTripleQuoteComment used in wrong context: no third quote?") + } + + var buf bytes.Buffer + var numQuotes int + for { + l.next = l.sc.Next() + if l.next == '"' { + numQuotes++ + } else { + numQuotes = 0 + } + buf.WriteRune(l.next) + if numQuotes == 3 || l.next == scanner.EOF { + break + } + } + val := buf.String() + val = val[:len(val)-numQuotes] + return blockString(val) +} + +func (l *Lexer) consumeStringComment() string { + val, err := strconv.Unquote(l.sc.TokenText()) + if err != nil { + panic(err) + } + return val +} + +// consumeComment consumes all characters from `#` to the first encountered line terminator. +// The characters are appended to `l.comment`. +func (l *Lexer) consumeComment() { + if l.next != '#' { + panic("consumeComment used in wrong context") + } + + // TODO: count and trim whitespace so we can dedent any following lines. + if l.sc.Peek() == ' ' { + l.sc.Next() + } + + if l.comment.Len() > 0 { + l.comment.WriteRune('\n') + } + + for { + next := l.sc.Next() + if next == '\r' || next == '\n' || next == scanner.EOF { + break + } + l.comment.WriteRune(next) + } +} + +func (l *Lexer) CatchScannerError(s *scanner.Scanner, msg string) { + l.SyntaxError(msg) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go new file mode 100644 index 00000000..a6af3c43 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go @@ -0,0 +1,58 @@ +package common + +import ( + "text/scanner" + + "github.com/graph-gophers/graphql-go/types" +) + +func ParseLiteral(l *Lexer, constOnly bool) types.Value { + loc := l.Location() + switch l.Peek() { + case '$': + if constOnly { + l.SyntaxError("variable not allowed") + panic("unreachable") + } + l.ConsumeToken('$') + return &types.Variable{Name: l.ConsumeIdent(), Loc: loc} + + case scanner.Int, scanner.Float, scanner.String, scanner.Ident: + lit := l.ConsumeLiteral() + if lit.Type == scanner.Ident && lit.Text == "null" { + return &types.NullValue{Loc: loc} + } + lit.Loc = loc + return lit + case '-': + l.ConsumeToken('-') + lit := l.ConsumeLiteral() + lit.Text = "-" + lit.Text + lit.Loc = loc + return lit + case '[': + l.ConsumeToken('[') + var list []types.Value + for l.Peek() != ']' { + list = append(list, ParseLiteral(l, constOnly)) + } + l.ConsumeToken(']') + return &types.ListValue{Values: list, Loc: loc} + + case '{': + l.ConsumeToken('{') + var fields []*types.ObjectField + for l.Peek() != '}' { + name := l.ConsumeIdentWithLoc() + l.ConsumeToken(':') + value := ParseLiteral(l, constOnly) + fields = append(fields, &types.ObjectField{Name: name, Value: value}) + } + l.ConsumeToken('}') + return &types.ObjectValue{Fields: fields, Loc: loc} + + default: + l.SyntaxError("invalid value") + panic("unreachable") + } +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go new file mode 100644 index 00000000..4a30f46e --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go @@ -0,0 +1,67 @@ +package common + +import ( + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/types" +) + +func ParseType(l *Lexer) types.Type { + t := parseNullType(l) + if l.Peek() == '!' { + l.ConsumeToken('!') + return &types.NonNull{OfType: t} + } + return t +} + +func parseNullType(l *Lexer) types.Type { + if l.Peek() == '[' { + l.ConsumeToken('[') + ofType := ParseType(l) + l.ConsumeToken(']') + return &types.List{OfType: ofType} + } + + return &types.TypeName{Ident: l.ConsumeIdentWithLoc()} +} + +type Resolver func(name string) types.Type + +// ResolveType attempts to resolve a type's name against a resolving function. +// This function is used when one needs to check if a TypeName exists in the resolver (typically a Schema). +// +// In the example below, ResolveType would be used to check if the resolving function +// returns a valid type for Dimension: +// +// type Profile { +// picture(dimensions: Dimension): Url +// } +// +// ResolveType recursively unwraps List and NonNull types until a NamedType is reached. +func ResolveType(t types.Type, resolver Resolver) (types.Type, *errors.QueryError) { + switch t := t.(type) { + case *types.List: + ofType, err := ResolveType(t.OfType, resolver) + if err != nil { + return nil, err + } + return &types.List{OfType: ofType}, nil + case *types.NonNull: + ofType, err := ResolveType(t.OfType, resolver) + if err != nil { + return nil, err + } + return &types.NonNull{OfType: ofType}, nil + case *types.TypeName: + refT := resolver(t.Name) + if refT == nil { + err := errors.Errorf("Unknown type %q.", t.Name) + err.Rule = "KnownTypeNames" + err.Locations = []errors.Location{t.Loc} + return nil, err + } + return refT, nil + default: + return t, nil + } +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go new file mode 100644 index 00000000..2d6e0b54 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go @@ -0,0 +1,37 @@ +package common + +import ( + "github.com/graph-gophers/graphql-go/types" +) + +func ParseInputValue(l *Lexer) *types.InputValueDefinition { + p := &types.InputValueDefinition{} + p.Loc = l.Location() + p.Desc = l.DescComment() + p.Name = l.ConsumeIdentWithLoc() + l.ConsumeToken(':') + p.TypeLoc = l.Location() + p.Type = ParseType(l) + if l.Peek() == '=' { + l.ConsumeToken('=') + p.Default = ParseLiteral(l, true) + } + p.Directives = ParseDirectives(l) + return p +} + +func ParseArgumentList(l *Lexer) types.ArgumentList { + var args types.ArgumentList + l.ConsumeToken('(') + for l.Peek() != ')' { + name := l.ConsumeIdentWithLoc() + l.ConsumeToken(':') + value := ParseLiteral(l, false) + args = append(args, &types.Argument{ + Name: name, + Value: value, + }) + } + l.ConsumeToken(')') + return args +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go new file mode 100644 index 00000000..6b478487 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go @@ -0,0 +1,381 @@ +package exec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "reflect" + "sync" + "time" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/exec/selected" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/log" + "github.com/graph-gophers/graphql-go/trace" + "github.com/graph-gophers/graphql-go/types" +) + +type Request struct { + selected.Request + Limiter chan struct{} + Tracer trace.Tracer + Logger log.Logger + PanicHandler errors.PanicHandler + SubscribeResolverTimeout time.Duration +} + +func (r *Request) handlePanic(ctx context.Context) { + if value := recover(); value != nil { + r.Logger.LogPanic(ctx, value) + r.AddError(r.PanicHandler.MakePanicError(ctx, value)) + } +} + +type extensionser interface { + Extensions() map[string]interface{} +} + +func (r *Request) Execute(ctx context.Context, s *resolvable.Schema, op *types.OperationDefinition) ([]byte, []*errors.QueryError) { + var out bytes.Buffer + func() { + defer r.handlePanic(ctx) + sels := selected.ApplyOperation(&r.Request, s, op) + r.execSelections(ctx, sels, nil, s, s.Resolver, &out, op.Type == query.Mutation) + }() + + if err := ctx.Err(); err != nil { + return nil, []*errors.QueryError{errors.Errorf("%s", err)} + } + + return out.Bytes(), r.Errs +} + +type fieldToExec struct { + field *selected.SchemaField + sels []selected.Selection + resolver reflect.Value + out *bytes.Buffer +} + +func resolvedToNull(b *bytes.Buffer) bool { + return bytes.Equal(b.Bytes(), []byte("null")) +} + +func (r *Request) execSelections(ctx context.Context, sels []selected.Selection, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer, serially bool) { + async := !serially && selected.HasAsyncSel(sels) + + var fields []*fieldToExec + collectFieldsToResolve(sels, s, resolver, &fields, make(map[string]*fieldToExec)) + + if async { + var wg sync.WaitGroup + wg.Add(len(fields)) + for _, f := range fields { + go func(f *fieldToExec) { + defer wg.Done() + defer r.handlePanic(ctx) + f.out = new(bytes.Buffer) + execFieldSelection(ctx, r, s, f, &pathSegment{path, f.field.Alias}, true) + }(f) + } + wg.Wait() + } else { + for _, f := range fields { + f.out = new(bytes.Buffer) + execFieldSelection(ctx, r, s, f, &pathSegment{path, f.field.Alias}, true) + } + } + + out.WriteByte('{') + for i, f := range fields { + // If a non-nullable child resolved to null, an error was added to the + // "errors" list in the response, so this field resolves to null. + // If this field is non-nullable, the error is propagated to its parent. + if _, ok := f.field.Type.(*types.NonNull); ok && resolvedToNull(f.out) { + out.Reset() + out.Write([]byte("null")) + return + } + + if i > 0 { + out.WriteByte(',') + } + out.WriteByte('"') + out.WriteString(f.field.Alias) + out.WriteByte('"') + out.WriteByte(':') + out.Write(f.out.Bytes()) + } + out.WriteByte('}') +} + +func collectFieldsToResolve(sels []selected.Selection, s *resolvable.Schema, resolver reflect.Value, fields *[]*fieldToExec, fieldByAlias map[string]*fieldToExec) { + for _, sel := range sels { + switch sel := sel.(type) { + case *selected.SchemaField: + field, ok := fieldByAlias[sel.Alias] + if !ok { // validation already checked for conflict (TODO) + field = &fieldToExec{field: sel, resolver: resolver} + fieldByAlias[sel.Alias] = field + *fields = append(*fields, field) + } + field.sels = append(field.sels, sel.Sels...) + + case *selected.TypenameField: + _, ok := fieldByAlias[sel.Alias] + if !ok { + res := reflect.ValueOf(typeOf(sel, resolver)) + f := s.FieldTypename + f.TypeName = res.String() + + sf := &selected.SchemaField{ + Field: f, + Alias: sel.Alias, + FixedResult: res, + } + + field := &fieldToExec{field: sf, resolver: resolver} + *fields = append(*fields, field) + fieldByAlias[sel.Alias] = field + } + + case *selected.TypeAssertion: + out := resolver.Method(sel.MethodIndex).Call(nil) + if !out[1].Bool() { + continue + } + collectFieldsToResolve(sel.Sels, s, out[0], fields, fieldByAlias) + + default: + panic("unreachable") + } + } +} + +func typeOf(tf *selected.TypenameField, resolver reflect.Value) string { + if len(tf.TypeAssertions) == 0 { + return tf.Name + } + for name, a := range tf.TypeAssertions { + out := resolver.Method(a.MethodIndex).Call(nil) + if out[1].Bool() { + return name + } + } + return "" +} + +func execFieldSelection(ctx context.Context, r *Request, s *resolvable.Schema, f *fieldToExec, path *pathSegment, applyLimiter bool) { + if applyLimiter { + r.Limiter <- struct{}{} + } + + var result reflect.Value + var err *errors.QueryError + + traceCtx, finish := r.Tracer.TraceField(ctx, f.field.TraceLabel, f.field.TypeName, f.field.Name, !f.field.Async, f.field.Args) + defer func() { + finish(err) + }() + + err = func() (err *errors.QueryError) { + defer func() { + if panicValue := recover(); panicValue != nil { + r.Logger.LogPanic(ctx, panicValue) + err = r.PanicHandler.MakePanicError(ctx, panicValue) + err.Path = path.toSlice() + } + }() + + if f.field.FixedResult.IsValid() { + result = f.field.FixedResult + return nil + } + + if err := traceCtx.Err(); err != nil { + return errors.Errorf("%s", err) // don't execute any more resolvers if context got cancelled + } + + res := f.resolver + if f.field.UseMethodResolver() { + var in []reflect.Value + if f.field.HasContext { + in = append(in, reflect.ValueOf(traceCtx)) + } + if f.field.ArgsPacker != nil { + in = append(in, f.field.PackedArgs) + } + callOut := res.Method(f.field.MethodIndex).Call(in) + result = callOut[0] + if f.field.HasError && !callOut[1].IsNil() { + resolverErr := callOut[1].Interface().(error) + err := errors.Errorf("%s", resolverErr) + err.Path = path.toSlice() + err.ResolverError = resolverErr + if ex, ok := callOut[1].Interface().(extensionser); ok { + err.Extensions = ex.Extensions() + } + return err + } + } else { + // TODO extract out unwrapping ptr logic to a common place + if res.Kind() == reflect.Ptr { + res = res.Elem() + } + result = res.FieldByIndex(f.field.FieldIndex) + } + return nil + }() + + if applyLimiter { + <-r.Limiter + } + + if err != nil { + // If an error occurred while resolving a field, it should be treated as though the field + // returned null, and an error must be added to the "errors" list in the response. + r.AddError(err) + f.out.WriteString("null") + return + } + + r.execSelectionSet(traceCtx, f.sels, f.field.Type, path, s, result, f.out) +} + +func (r *Request) execSelectionSet(ctx context.Context, sels []selected.Selection, typ types.Type, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer) { + t, nonNull := unwrapNonNull(typ) + + // a reflect.Value of a nil interface will show up as an Invalid value + if resolver.Kind() == reflect.Invalid || ((resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface) && resolver.IsNil()) { + // If a field of a non-null type resolves to null (either because the + // function to resolve the field returned null or because an error occurred), + // add an error to the "errors" list in the response. + if nonNull { + err := errors.Errorf("graphql: got nil for non-null %q", t) + err.Path = path.toSlice() + r.AddError(err) + } + out.WriteString("null") + return + } + + switch t.(type) { + case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union: + r.execSelections(ctx, sels, path, s, resolver, out, false) + return + } + + // Any pointers or interfaces at this point should be non-nil, so we can get the actual value of them + // for serialization + if resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface { + resolver = resolver.Elem() + } + + switch t := t.(type) { + case *types.List: + r.execList(ctx, sels, t, path, s, resolver, out) + + case *types.ScalarTypeDefinition: + v := resolver.Interface() + data, err := json.Marshal(v) + if err != nil { + panic(errors.Errorf("could not marshal %v: %s", v, err)) + } + out.Write(data) + + case *types.EnumTypeDefinition: + var stringer fmt.Stringer = resolver + if s, ok := resolver.Interface().(fmt.Stringer); ok { + stringer = s + } + name := stringer.String() + var valid bool + for _, v := range t.EnumValuesDefinition { + if v.EnumValue == name { + valid = true + break + } + } + if !valid { + err := errors.Errorf("Invalid value %s.\nExpected type %s, found %s.", name, t.Name, name) + err.Path = path.toSlice() + r.AddError(err) + out.WriteString("null") + return + } + out.WriteByte('"') + out.WriteString(name) + out.WriteByte('"') + + default: + panic("unreachable") + } +} + +func (r *Request) execList(ctx context.Context, sels []selected.Selection, typ *types.List, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer) { + l := resolver.Len() + entryouts := make([]bytes.Buffer, l) + + if selected.HasAsyncSel(sels) { + // Limit the number of concurrent goroutines spawned as it can lead to large + // memory spikes for large lists. + concurrency := cap(r.Limiter) + sem := make(chan struct{}, concurrency) + for i := 0; i < l; i++ { + sem <- struct{}{} + go func(i int) { + defer func() { <-sem }() + defer r.handlePanic(ctx) + r.execSelectionSet(ctx, sels, typ.OfType, &pathSegment{path, i}, s, resolver.Index(i), &entryouts[i]) + }(i) + } + for i := 0; i < concurrency; i++ { + sem <- struct{}{} + } + } else { + for i := 0; i < l; i++ { + r.execSelectionSet(ctx, sels, typ.OfType, &pathSegment{path, i}, s, resolver.Index(i), &entryouts[i]) + } + } + + _, listOfNonNull := typ.OfType.(*types.NonNull) + + out.WriteByte('[') + for i, entryout := range entryouts { + // If the list wraps a non-null type and one of the list elements + // resolves to null, then the entire list resolves to null. + if listOfNonNull && resolvedToNull(&entryout) { + out.Reset() + out.WriteString("null") + return + } + + if i > 0 { + out.WriteByte(',') + } + out.Write(entryout.Bytes()) + } + out.WriteByte(']') +} + +func unwrapNonNull(t types.Type) (types.Type, bool) { + if nn, ok := t.(*types.NonNull); ok { + return nn.OfType, true + } + return t, false +} + +type pathSegment struct { + parent *pathSegment + value interface{} +} + +func (p *pathSegment) toSlice() []interface{} { + if p == nil { + return nil + } + return append(p.parent.toSlice(), p.value) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go new file mode 100644 index 00000000..c0bb7dc9 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go @@ -0,0 +1,390 @@ +package packer + +import ( + "fmt" + "math" + "reflect" + "strings" + + "github.com/graph-gophers/graphql-go/decode" + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/types" +) + +type packer interface { + Pack(value interface{}) (reflect.Value, error) +} + +type Builder struct { + packerMap map[typePair]*packerMapEntry + structPackers []*StructPacker +} + +type typePair struct { + graphQLType types.Type + resolverType reflect.Type +} + +type packerMapEntry struct { + packer packer + targets []*packer +} + +func NewBuilder() *Builder { + return &Builder{ + packerMap: make(map[typePair]*packerMapEntry), + } +} + +func (b *Builder) Finish() error { + for _, entry := range b.packerMap { + for _, target := range entry.targets { + *target = entry.packer + } + } + + for _, p := range b.structPackers { + p.defaultStruct = reflect.New(p.structType).Elem() + for _, f := range p.fields { + if defaultVal := f.field.Default; defaultVal != nil { + v, err := f.fieldPacker.Pack(defaultVal.Deserialize(nil)) + if err != nil { + return err + } + p.defaultStruct.FieldByIndex(f.fieldIndex).Set(v) + } + } + } + + return nil +} + +func (b *Builder) assignPacker(target *packer, schemaType types.Type, reflectType reflect.Type) error { + k := typePair{schemaType, reflectType} + ref, ok := b.packerMap[k] + if !ok { + ref = &packerMapEntry{} + b.packerMap[k] = ref + var err error + ref.packer, err = b.makePacker(schemaType, reflectType) + if err != nil { + return err + } + } + ref.targets = append(ref.targets, target) + return nil +} + +func (b *Builder) makePacker(schemaType types.Type, reflectType reflect.Type) (packer, error) { + t, nonNull := unwrapNonNull(schemaType) + if !nonNull { + if reflectType.Kind() == reflect.Ptr { + elemType := reflectType.Elem() + addPtr := true + if _, ok := t.(*types.InputObject); ok { + elemType = reflectType // keep pointer for input objects + addPtr = false + } + elem, err := b.makeNonNullPacker(t, elemType) + if err != nil { + return nil, err + } + return &nullPacker{ + elemPacker: elem, + valueType: reflectType, + addPtr: addPtr, + }, nil + } else if isNullable(reflectType) { + elemType := reflectType + addPtr := false + elem, err := b.makeNonNullPacker(t, elemType) + if err != nil { + return nil, err + } + return &nullPacker{ + elemPacker: elem, + valueType: reflectType, + addPtr: addPtr, + }, nil + } else { + return nil, fmt.Errorf("%s is not a pointer or a nullable type", reflectType) + } + } + + return b.makeNonNullPacker(t, reflectType) +} + +func (b *Builder) makeNonNullPacker(schemaType types.Type, reflectType reflect.Type) (packer, error) { + if u, ok := reflect.New(reflectType).Interface().(decode.Unmarshaler); ok { + if !u.ImplementsGraphQLType(schemaType.String()) { + return nil, fmt.Errorf("can not unmarshal %s into %s", schemaType, reflectType) + } + return &unmarshalerPacker{ + ValueType: reflectType, + }, nil + } + + switch t := schemaType.(type) { + case *types.ScalarTypeDefinition: + return &ValuePacker{ + ValueType: reflectType, + }, nil + + case *types.EnumTypeDefinition: + if reflectType.Kind() != reflect.String { + return nil, fmt.Errorf("wrong type, expected %s", reflect.String) + } + return &ValuePacker{ + ValueType: reflectType, + }, nil + + case *types.InputObject: + e, err := b.MakeStructPacker(t.Values, reflectType) + if err != nil { + return nil, err + } + return e, nil + + case *types.List: + if reflectType.Kind() != reflect.Slice { + return nil, fmt.Errorf("expected slice, got %s", reflectType) + } + p := &listPacker{ + sliceType: reflectType, + } + if err := b.assignPacker(&p.elem, t.OfType, reflectType.Elem()); err != nil { + return nil, err + } + return p, nil + + case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union: + return nil, fmt.Errorf("type of kind %s can not be used as input", t.Kind()) + + default: + panic("unreachable") + } +} + +func (b *Builder) MakeStructPacker(values []*types.InputValueDefinition, typ reflect.Type) (*StructPacker, error) { + structType := typ + usePtr := false + if typ.Kind() == reflect.Ptr { + structType = typ.Elem() + usePtr = true + } + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct or pointer to struct, got %s (hint: missing `args struct { ... }` wrapper for field arguments?)", typ) + } + + var fields []*structPackerField + for _, v := range values { + fe := &structPackerField{field: v} + fx := func(n string) bool { + return strings.EqualFold(stripUnderscore(n), stripUnderscore(v.Name.Name)) + } + + sf, ok := structType.FieldByNameFunc(fx) + if !ok { + return nil, fmt.Errorf("%s does not define field %q (hint: missing `args struct { ... }` wrapper for field arguments, or missing field on input struct)", typ, v.Name.Name) + } + if sf.PkgPath != "" { + return nil, fmt.Errorf("field %q must be exported", sf.Name) + } + fe.fieldIndex = sf.Index + + ft := v.Type + if v.Default != nil { + ft, _ = unwrapNonNull(ft) + ft = &types.NonNull{OfType: ft} + } + + if err := b.assignPacker(&fe.fieldPacker, ft, sf.Type); err != nil { + return nil, fmt.Errorf("field %q: %s", sf.Name, err) + } + + fields = append(fields, fe) + } + + p := &StructPacker{ + structType: structType, + usePtr: usePtr, + fields: fields, + } + b.structPackers = append(b.structPackers, p) + return p, nil +} + +type StructPacker struct { + structType reflect.Type + usePtr bool + defaultStruct reflect.Value + fields []*structPackerField +} + +type structPackerField struct { + field *types.InputValueDefinition + fieldIndex []int + fieldPacker packer +} + +func (p *StructPacker) Pack(value interface{}) (reflect.Value, error) { + if value == nil { + return reflect.Value{}, errors.Errorf("got null for non-null") + } + + values := value.(map[string]interface{}) + v := reflect.New(p.structType) + v.Elem().Set(p.defaultStruct) + for _, f := range p.fields { + if value, ok := values[f.field.Name.Name]; ok { + packed, err := f.fieldPacker.Pack(value) + if err != nil { + return reflect.Value{}, err + } + v.Elem().FieldByIndex(f.fieldIndex).Set(packed) + } + } + if !p.usePtr { + return v.Elem(), nil + } + return v, nil +} + +type listPacker struct { + sliceType reflect.Type + elem packer +} + +func (e *listPacker) Pack(value interface{}) (reflect.Value, error) { + list, ok := value.([]interface{}) + if !ok { + list = []interface{}{value} + } + + v := reflect.MakeSlice(e.sliceType, len(list), len(list)) + for i := range list { + packed, err := e.elem.Pack(list[i]) + if err != nil { + return reflect.Value{}, err + } + v.Index(i).Set(packed) + } + return v, nil +} + +type nullPacker struct { + elemPacker packer + valueType reflect.Type + addPtr bool +} + +func (p *nullPacker) Pack(value interface{}) (reflect.Value, error) { + if value == nil && !isNullable(p.valueType) { + return reflect.Zero(p.valueType), nil + } + + v, err := p.elemPacker.Pack(value) + if err != nil { + return reflect.Value{}, err + } + + if p.addPtr { + ptr := reflect.New(p.valueType.Elem()) + ptr.Elem().Set(v) + return ptr, nil + } + + return v, nil +} + +type ValuePacker struct { + ValueType reflect.Type +} + +func (p *ValuePacker) Pack(value interface{}) (reflect.Value, error) { + if value == nil { + return reflect.Value{}, errors.Errorf("got null for non-null") + } + + coerced, err := unmarshalInput(p.ValueType, value) + if err != nil { + return reflect.Value{}, fmt.Errorf("could not unmarshal %#v (%T) into %s: %s", value, value, p.ValueType, err) + } + return reflect.ValueOf(coerced), nil +} + +type unmarshalerPacker struct { + ValueType reflect.Type +} + +func (p *unmarshalerPacker) Pack(value interface{}) (reflect.Value, error) { + if value == nil && !isNullable(p.ValueType) { + return reflect.Value{}, errors.Errorf("got null for non-null") + } + + v := reflect.New(p.ValueType) + if err := v.Interface().(decode.Unmarshaler).UnmarshalGraphQL(value); err != nil { + return reflect.Value{}, err + } + return v.Elem(), nil +} + +func unmarshalInput(typ reflect.Type, input interface{}) (interface{}, error) { + if reflect.TypeOf(input) == typ { + return input, nil + } + + switch typ.Kind() { + case reflect.Int32: + switch input := input.(type) { + case int: + if input < math.MinInt32 || input > math.MaxInt32 { + return nil, fmt.Errorf("not a 32-bit integer") + } + return int32(input), nil + case float64: + coerced := int32(input) + if input < math.MinInt32 || input > math.MaxInt32 || float64(coerced) != input { + return nil, fmt.Errorf("not a 32-bit integer") + } + return coerced, nil + } + + case reflect.Float64: + switch input := input.(type) { + case int32: + return float64(input), nil + case int: + return float64(input), nil + } + + case reflect.String: + if reflect.TypeOf(input).ConvertibleTo(typ) { + return reflect.ValueOf(input).Convert(typ).Interface(), nil + } + } + + return nil, fmt.Errorf("incompatible type") +} + +func unwrapNonNull(t types.Type) (types.Type, bool) { + if nn, ok := t.(*types.NonNull); ok { + return nn.OfType, true + } + return t, false +} + +func stripUnderscore(s string) string { + return strings.Replace(s, "_", "", -1) +} + +// NullUnmarshaller is an unmarshaller that can handle a nil input +type NullUnmarshaller interface { + decode.Unmarshaler + Nullable() +} + +func isNullable(t reflect.Type) bool { + _, ok := reflect.New(t).Interface().(NullUnmarshaller) + return ok +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go new file mode 100644 index 00000000..02d5e262 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go @@ -0,0 +1,70 @@ +package resolvable + +import ( + "reflect" + + "github.com/graph-gophers/graphql-go/introspection" + "github.com/graph-gophers/graphql-go/types" +) + +// Meta defines the details of the metadata schema for introspection. +type Meta struct { + FieldSchema Field + FieldType Field + FieldTypename Field + Schema *Object + Type *Object +} + +func newMeta(s *types.Schema) *Meta { + var err error + b := newBuilder(s) + + metaSchema := s.Types["__Schema"].(*types.ObjectTypeDefinition) + so, err := b.makeObjectExec(metaSchema.Name, metaSchema.Fields, nil, false, reflect.TypeOf(&introspection.Schema{})) + if err != nil { + panic(err) + } + + metaType := s.Types["__Type"].(*types.ObjectTypeDefinition) + t, err := b.makeObjectExec(metaType.Name, metaType.Fields, nil, false, reflect.TypeOf(&introspection.Type{})) + if err != nil { + panic(err) + } + + if err := b.finish(); err != nil { + panic(err) + } + + fieldTypename := Field{ + FieldDefinition: types.FieldDefinition{ + Name: "__typename", + Type: &types.NonNull{OfType: s.Types["String"]}, + }, + TraceLabel: "GraphQL field: __typename", + } + + fieldSchema := Field{ + FieldDefinition: types.FieldDefinition{ + Name: "__schema", + Type: s.Types["__Schema"], + }, + TraceLabel: "GraphQL field: __schema", + } + + fieldType := Field{ + FieldDefinition: types.FieldDefinition{ + Name: "__type", + Type: s.Types["__Type"], + }, + TraceLabel: "GraphQL field: __type", + } + + return &Meta{ + FieldSchema: fieldSchema, + FieldTypename: fieldTypename, + FieldType: fieldType, + Schema: so, + Type: t, + } +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go new file mode 100644 index 00000000..3410f557 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go @@ -0,0 +1,453 @@ +package resolvable + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/graph-gophers/graphql-go/decode" + "github.com/graph-gophers/graphql-go/internal/exec/packer" + "github.com/graph-gophers/graphql-go/types" +) + +type Schema struct { + *Meta + types.Schema + Query Resolvable + Mutation Resolvable + Subscription Resolvable + Resolver reflect.Value +} + +type Resolvable interface { + isResolvable() +} + +type Object struct { + Name string + Fields map[string]*Field + TypeAssertions map[string]*TypeAssertion +} + +type Field struct { + types.FieldDefinition + TypeName string + MethodIndex int + FieldIndex []int + HasContext bool + HasError bool + ArgsPacker *packer.StructPacker + ValueExec Resolvable + TraceLabel string +} + +func (f *Field) UseMethodResolver() bool { + return len(f.FieldIndex) == 0 +} + +type TypeAssertion struct { + MethodIndex int + TypeExec Resolvable +} + +type List struct { + Elem Resolvable +} + +type Scalar struct{} + +func (*Object) isResolvable() {} +func (*List) isResolvable() {} +func (*Scalar) isResolvable() {} + +func ApplyResolver(s *types.Schema, resolver interface{}) (*Schema, error) { + if resolver == nil { + return &Schema{Meta: newMeta(s), Schema: *s}, nil + } + + b := newBuilder(s) + + var query, mutation, subscription Resolvable + + if t, ok := s.EntryPoints["query"]; ok { + if err := b.assignExec(&query, t, reflect.TypeOf(resolver)); err != nil { + return nil, err + } + } + + if t, ok := s.EntryPoints["mutation"]; ok { + if err := b.assignExec(&mutation, t, reflect.TypeOf(resolver)); err != nil { + return nil, err + } + } + + if t, ok := s.EntryPoints["subscription"]; ok { + if err := b.assignExec(&subscription, t, reflect.TypeOf(resolver)); err != nil { + return nil, err + } + } + + if err := b.finish(); err != nil { + return nil, err + } + + return &Schema{ + Meta: newMeta(s), + Schema: *s, + Resolver: reflect.ValueOf(resolver), + Query: query, + Mutation: mutation, + Subscription: subscription, + }, nil +} + +type execBuilder struct { + schema *types.Schema + resMap map[typePair]*resMapEntry + packerBuilder *packer.Builder +} + +type typePair struct { + graphQLType types.Type + resolverType reflect.Type +} + +type resMapEntry struct { + exec Resolvable + targets []*Resolvable +} + +func newBuilder(s *types.Schema) *execBuilder { + return &execBuilder{ + schema: s, + resMap: make(map[typePair]*resMapEntry), + packerBuilder: packer.NewBuilder(), + } +} + +func (b *execBuilder) finish() error { + for _, entry := range b.resMap { + for _, target := range entry.targets { + *target = entry.exec + } + } + + return b.packerBuilder.Finish() +} + +func (b *execBuilder) assignExec(target *Resolvable, t types.Type, resolverType reflect.Type) error { + k := typePair{t, resolverType} + ref, ok := b.resMap[k] + if !ok { + ref = &resMapEntry{} + b.resMap[k] = ref + var err error + ref.exec, err = b.makeExec(t, resolverType) + if err != nil { + return err + } + } + ref.targets = append(ref.targets, target) + return nil +} + +func (b *execBuilder) makeExec(t types.Type, resolverType reflect.Type) (Resolvable, error) { + var nonNull bool + t, nonNull = unwrapNonNull(t) + + switch t := t.(type) { + case *types.ObjectTypeDefinition: + return b.makeObjectExec(t.Name, t.Fields, nil, nonNull, resolverType) + + case *types.InterfaceTypeDefinition: + return b.makeObjectExec(t.Name, t.Fields, t.PossibleTypes, nonNull, resolverType) + + case *types.Union: + return b.makeObjectExec(t.Name, nil, t.UnionMemberTypes, nonNull, resolverType) + } + + if !nonNull { + if resolverType.Kind() != reflect.Ptr { + return nil, fmt.Errorf("%s is not a pointer", resolverType) + } + resolverType = resolverType.Elem() + } + + switch t := t.(type) { + case *types.ScalarTypeDefinition: + return makeScalarExec(t, resolverType) + + case *types.EnumTypeDefinition: + return &Scalar{}, nil + + case *types.List: + if resolverType.Kind() != reflect.Slice { + return nil, fmt.Errorf("%s is not a slice", resolverType) + } + e := &List{} + if err := b.assignExec(&e.Elem, t.OfType, resolverType.Elem()); err != nil { + return nil, err + } + return e, nil + + default: + panic("invalid type: " + t.String()) + } +} + +func makeScalarExec(t *types.ScalarTypeDefinition, resolverType reflect.Type) (Resolvable, error) { + implementsType := false + switch r := reflect.New(resolverType).Interface().(type) { + case *int32: + implementsType = t.Name == "Int" + case *float64: + implementsType = t.Name == "Float" + case *string: + implementsType = t.Name == "String" + case *bool: + implementsType = t.Name == "Boolean" + case decode.Unmarshaler: + implementsType = r.ImplementsGraphQLType(t.Name) + } + + if !implementsType { + return nil, fmt.Errorf("can not use %s as %s", resolverType, t.Name) + } + return &Scalar{}, nil +} + +func (b *execBuilder) makeObjectExec(typeName string, fields types.FieldsDefinition, possibleTypes []*types.ObjectTypeDefinition, + nonNull bool, resolverType reflect.Type) (*Object, error) { + if !nonNull { + if resolverType.Kind() != reflect.Ptr && resolverType.Kind() != reflect.Interface { + return nil, fmt.Errorf("%s is not a pointer or interface", resolverType) + } + } + + methodHasReceiver := resolverType.Kind() != reflect.Interface + + Fields := make(map[string]*Field) + rt := unwrapPtr(resolverType) + fieldsCount := fieldCount(rt, map[string]int{}) + for _, f := range fields { + var fieldIndex []int + methodIndex := findMethod(resolverType, f.Name) + if b.schema.UseFieldResolvers && methodIndex == -1 { + if fieldsCount[strings.ToLower(stripUnderscore(f.Name))] > 1 { + return nil, fmt.Errorf("%s does not resolve %q: ambiguous field %q", resolverType, typeName, f.Name) + } + fieldIndex = findField(rt, f.Name, []int{}) + } + if methodIndex == -1 && len(fieldIndex) == 0 { + hint := "" + if findMethod(reflect.PtrTo(resolverType), f.Name) != -1 { + hint = " (hint: the method exists on the pointer type)" + } + return nil, fmt.Errorf("%s does not resolve %q: missing method for field %q%s", resolverType, typeName, f.Name, hint) + } + + var m reflect.Method + var sf reflect.StructField + if methodIndex != -1 { + m = resolverType.Method(methodIndex) + } else { + sf = rt.FieldByIndex(fieldIndex) + } + fe, err := b.makeFieldExec(typeName, f, m, sf, methodIndex, fieldIndex, methodHasReceiver) + if err != nil { + var resolverName string + if methodIndex != -1 { + resolverName = m.Name + } else { + resolverName = sf.Name + } + return nil, fmt.Errorf("%s\n\tused by (%s).%s", err, resolverType, resolverName) + } + Fields[f.Name] = fe + } + + // Check type assertions when + // 1) using method resolvers + // 2) Or resolver is not an interface type + typeAssertions := make(map[string]*TypeAssertion) + if !b.schema.UseFieldResolvers || resolverType.Kind() != reflect.Interface { + for _, impl := range possibleTypes { + methodIndex := findMethod(resolverType, "To"+impl.Name) + if methodIndex == -1 { + return nil, fmt.Errorf("%s does not resolve %q: missing method %q to convert to %q", resolverType, typeName, "To"+impl.Name, impl.Name) + } + if resolverType.Method(methodIndex).Type.NumOut() != 2 { + return nil, fmt.Errorf("%s does not resolve %q: method %q should return a value and a bool indicating success", resolverType, typeName, "To"+impl.Name) + } + a := &TypeAssertion{ + MethodIndex: methodIndex, + } + if err := b.assignExec(&a.TypeExec, impl, resolverType.Method(methodIndex).Type.Out(0)); err != nil { + return nil, err + } + typeAssertions[impl.Name] = a + } + } + + return &Object{ + Name: typeName, + Fields: Fields, + TypeAssertions: typeAssertions, + }, nil +} + +var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +func (b *execBuilder) makeFieldExec(typeName string, f *types.FieldDefinition, m reflect.Method, sf reflect.StructField, + methodIndex int, fieldIndex []int, methodHasReceiver bool) (*Field, error) { + + var argsPacker *packer.StructPacker + var hasError bool + var hasContext bool + + // Validate resolver method only when there is one + if methodIndex != -1 { + in := make([]reflect.Type, m.Type.NumIn()) + for i := range in { + in[i] = m.Type.In(i) + } + if methodHasReceiver { + in = in[1:] // first parameter is receiver + } + + hasContext = len(in) > 0 && in[0] == contextType + if hasContext { + in = in[1:] + } + + if len(f.Arguments) > 0 { + if len(in) == 0 { + return nil, fmt.Errorf("must have parameter for field arguments") + } + var err error + argsPacker, err = b.packerBuilder.MakeStructPacker(f.Arguments, in[0]) + if err != nil { + return nil, err + } + in = in[1:] + } + + if len(in) > 0 { + return nil, fmt.Errorf("too many parameters") + } + + maxNumOfReturns := 2 + if m.Type.NumOut() < maxNumOfReturns-1 { + return nil, fmt.Errorf("too few return values") + } + + if m.Type.NumOut() > maxNumOfReturns { + return nil, fmt.Errorf("too many return values") + } + + hasError = m.Type.NumOut() == maxNumOfReturns + if hasError { + if m.Type.Out(maxNumOfReturns-1) != errorType { + return nil, fmt.Errorf(`must have "error" as its last return value`) + } + } + } + + fe := &Field{ + FieldDefinition: *f, + TypeName: typeName, + MethodIndex: methodIndex, + FieldIndex: fieldIndex, + HasContext: hasContext, + ArgsPacker: argsPacker, + HasError: hasError, + TraceLabel: fmt.Sprintf("GraphQL field: %s.%s", typeName, f.Name), + } + + var out reflect.Type + if methodIndex != -1 { + out = m.Type.Out(0) + sub, ok := b.schema.EntryPoints["subscription"] + if ok && typeName == sub.TypeName() && out.Kind() == reflect.Chan { + out = m.Type.Out(0).Elem() + } + } else { + out = sf.Type + } + if err := b.assignExec(&fe.ValueExec, f.Type, out); err != nil { + return nil, err + } + + return fe, nil +} + +func findMethod(t reflect.Type, name string) int { + for i := 0; i < t.NumMethod(); i++ { + if strings.EqualFold(stripUnderscore(name), stripUnderscore(t.Method(i).Name)) { + return i + } + } + return -1 +} + +func findField(t reflect.Type, name string, index []int) []int { + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + if field.Type.Kind() == reflect.Struct && field.Anonymous { + newIndex := findField(field.Type, name, []int{i}) + if len(newIndex) > 1 { + return append(index, newIndex...) + } + } + + if strings.EqualFold(stripUnderscore(name), stripUnderscore(field.Name)) { + return append(index, i) + } + } + + return index +} + +// fieldCount helps resolve ambiguity when more than one embedded struct contains fields with the same name. +func fieldCount(t reflect.Type, count map[string]int) map[string]int { + if t.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + fieldName := strings.ToLower(stripUnderscore(field.Name)) + + if field.Type.Kind() == reflect.Struct && field.Anonymous { + count = fieldCount(field.Type, count) + } else { + if _, ok := count[fieldName]; !ok { + count[fieldName] = 0 + } + count[fieldName]++ + } + } + + return count +} + +func unwrapNonNull(t types.Type) (types.Type, bool) { + if nn, ok := t.(*types.NonNull); ok { + return nn.OfType, true + } + return t, false +} + +func stripUnderscore(s string) string { + return strings.Replace(s, "_", "", -1) +} + +func unwrapPtr(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return t.Elem() + } + return t +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go new file mode 100644 index 00000000..9b96d2b6 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go @@ -0,0 +1,269 @@ +package selected + +import ( + "fmt" + "reflect" + "sync" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/exec/packer" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/introspection" + "github.com/graph-gophers/graphql-go/types" +) + +type Request struct { + Schema *types.Schema + Doc *types.ExecutableDefinition + Vars map[string]interface{} + Mu sync.Mutex + Errs []*errors.QueryError + DisableIntrospection bool +} + +func (r *Request) AddError(err *errors.QueryError) { + r.Mu.Lock() + r.Errs = append(r.Errs, err) + r.Mu.Unlock() +} + +func ApplyOperation(r *Request, s *resolvable.Schema, op *types.OperationDefinition) []Selection { + var obj *resolvable.Object + switch op.Type { + case query.Query: + obj = s.Query.(*resolvable.Object) + case query.Mutation: + obj = s.Mutation.(*resolvable.Object) + case query.Subscription: + obj = s.Subscription.(*resolvable.Object) + } + return applySelectionSet(r, s, obj, op.Selections) +} + +type Selection interface { + isSelection() +} + +type SchemaField struct { + resolvable.Field + Alias string + Args map[string]interface{} + PackedArgs reflect.Value + Sels []Selection + Async bool + FixedResult reflect.Value +} + +type TypeAssertion struct { + resolvable.TypeAssertion + Sels []Selection +} + +type TypenameField struct { + resolvable.Object + Alias string +} + +func (*SchemaField) isSelection() {} +func (*TypeAssertion) isSelection() {} +func (*TypenameField) isSelection() {} + +func applySelectionSet(r *Request, s *resolvable.Schema, e *resolvable.Object, sels []types.Selection) (flattenedSels []Selection) { + for _, sel := range sels { + switch sel := sel.(type) { + case *types.Field: + field := sel + if skipByDirective(r, field.Directives) { + continue + } + + switch field.Name.Name { + case "__typename": + // __typename is available even though r.DisableIntrospection == true + // because it is necessary when using union types and interfaces: https://graphql.org/learn/schema/#union-types + flattenedSels = append(flattenedSels, &TypenameField{ + Object: *e, + Alias: field.Alias.Name, + }) + + case "__schema": + if !r.DisableIntrospection { + flattenedSels = append(flattenedSels, &SchemaField{ + Field: s.Meta.FieldSchema, + Alias: field.Alias.Name, + Sels: applySelectionSet(r, s, s.Meta.Schema, field.SelectionSet), + Async: true, + FixedResult: reflect.ValueOf(introspection.WrapSchema(r.Schema)), + }) + } + + case "__type": + if !r.DisableIntrospection { + p := packer.ValuePacker{ValueType: reflect.TypeOf("")} + v, err := p.Pack(field.Arguments.MustGet("name").Deserialize(r.Vars)) + if err != nil { + r.AddError(errors.Errorf("%s", err)) + return nil + } + + t, ok := r.Schema.Types[v.String()] + if !ok { + return nil + } + + flattenedSels = append(flattenedSels, &SchemaField{ + Field: s.Meta.FieldType, + Alias: field.Alias.Name, + Sels: applySelectionSet(r, s, s.Meta.Type, field.SelectionSet), + Async: true, + FixedResult: reflect.ValueOf(introspection.WrapType(t)), + }) + } + + default: + fe := e.Fields[field.Name.Name] + + var args map[string]interface{} + var packedArgs reflect.Value + if fe.ArgsPacker != nil { + args = make(map[string]interface{}) + for _, arg := range field.Arguments { + args[arg.Name.Name] = arg.Value.Deserialize(r.Vars) + } + var err error + packedArgs, err = fe.ArgsPacker.Pack(args) + if err != nil { + r.AddError(errors.Errorf("%s", err)) + return + } + } + + fieldSels := applyField(r, s, fe.ValueExec, field.SelectionSet) + flattenedSels = append(flattenedSels, &SchemaField{ + Field: *fe, + Alias: field.Alias.Name, + Args: args, + PackedArgs: packedArgs, + Sels: fieldSels, + Async: fe.HasContext || fe.ArgsPacker != nil || fe.HasError || HasAsyncSel(fieldSels), + }) + } + + case *types.InlineFragment: + frag := sel + if skipByDirective(r, frag.Directives) { + continue + } + flattenedSels = append(flattenedSels, applyFragment(r, s, e, &frag.Fragment)...) + + case *types.FragmentSpread: + spread := sel + if skipByDirective(r, spread.Directives) { + continue + } + flattenedSels = append(flattenedSels, applyFragment(r, s, e, &r.Doc.Fragments.Get(spread.Name.Name).Fragment)...) + + default: + panic("invalid type") + } + } + return +} + +func applyFragment(r *Request, s *resolvable.Schema, e *resolvable.Object, frag *types.Fragment) []Selection { + if frag.On.Name != e.Name { + t := r.Schema.Resolve(frag.On.Name) + face, ok := t.(*types.InterfaceTypeDefinition) + if !ok && frag.On.Name != "" { + a, ok2 := e.TypeAssertions[frag.On.Name] + if !ok2 { + panic(fmt.Errorf("%q does not implement %q", frag.On, e.Name)) // TODO proper error handling + } + + return []Selection{&TypeAssertion{ + TypeAssertion: *a, + Sels: applySelectionSet(r, s, a.TypeExec.(*resolvable.Object), frag.Selections), + }} + } + if ok && len(face.PossibleTypes) > 0 { + sels := []Selection{} + for _, t := range face.PossibleTypes { + if t.Name == e.Name { + return applySelectionSet(r, s, e, frag.Selections) + } + + if a, ok := e.TypeAssertions[t.Name]; ok { + sels = append(sels, &TypeAssertion{ + TypeAssertion: *a, + Sels: applySelectionSet(r, s, a.TypeExec.(*resolvable.Object), frag.Selections), + }) + } + } + if len(sels) == 0 { + panic(fmt.Errorf("%q does not implement %q", e.Name, frag.On)) // TODO proper error handling + } + return sels + } + } + return applySelectionSet(r, s, e, frag.Selections) +} + +func applyField(r *Request, s *resolvable.Schema, e resolvable.Resolvable, sels []types.Selection) []Selection { + switch e := e.(type) { + case *resolvable.Object: + return applySelectionSet(r, s, e, sels) + case *resolvable.List: + return applyField(r, s, e.Elem, sels) + case *resolvable.Scalar: + return nil + default: + panic("unreachable") + } +} + +func skipByDirective(r *Request, directives types.DirectiveList) bool { + if d := directives.Get("skip"); d != nil { + p := packer.ValuePacker{ValueType: reflect.TypeOf(false)} + v, err := p.Pack(d.Arguments.MustGet("if").Deserialize(r.Vars)) + if err != nil { + r.AddError(errors.Errorf("%s", err)) + } + if err == nil && v.Bool() { + return true + } + } + + if d := directives.Get("include"); d != nil { + p := packer.ValuePacker{ValueType: reflect.TypeOf(false)} + v, err := p.Pack(d.Arguments.MustGet("if").Deserialize(r.Vars)) + if err != nil { + r.AddError(errors.Errorf("%s", err)) + } + if err == nil && !v.Bool() { + return true + } + } + + return false +} + +func HasAsyncSel(sels []Selection) bool { + for _, sel := range sels { + switch sel := sel.(type) { + case *SchemaField: + if sel.Async { + return true + } + case *TypeAssertion: + if HasAsyncSel(sel.Sels) { + return true + } + case *TypenameField: + // sync + default: + panic("unreachable") + } + } + return false +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go new file mode 100644 index 00000000..37ebacbc --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go @@ -0,0 +1,179 @@ +package exec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/exec/selected" + "github.com/graph-gophers/graphql-go/types" +) + +type Response struct { + Data json.RawMessage + Errors []*errors.QueryError +} + +func (r *Request) Subscribe(ctx context.Context, s *resolvable.Schema, op *types.OperationDefinition) <-chan *Response { + var result reflect.Value + var f *fieldToExec + var err *errors.QueryError + func() { + defer r.handlePanic(ctx) + + sels := selected.ApplyOperation(&r.Request, s, op) + var fields []*fieldToExec + collectFieldsToResolve(sels, s, s.Resolver, &fields, make(map[string]*fieldToExec)) + + // TODO: move this check into validation.Validate + if len(fields) != 1 { + err = errors.Errorf("%s", "can subscribe to at most one subscription at a time") + return + } + f = fields[0] + + var in []reflect.Value + if f.field.HasContext { + in = append(in, reflect.ValueOf(ctx)) + } + if f.field.ArgsPacker != nil { + in = append(in, f.field.PackedArgs) + } + callOut := f.resolver.Method(f.field.MethodIndex).Call(in) + result = callOut[0] + + if f.field.HasError && !callOut[1].IsNil() { + switch resolverErr := callOut[1].Interface().(type) { + case *errors.QueryError: + err = resolverErr + case error: + err = errors.Errorf("%s", resolverErr) + err.ResolverError = resolverErr + default: + panic(fmt.Errorf("can only deal with *QueryError and error types, got %T", resolverErr)) + } + } + }() + + // Handles the case where the locally executed func above panicked + if len(r.Request.Errs) > 0 { + return sendAndReturnClosed(&Response{Errors: r.Request.Errs}) + } + + if f == nil { + return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{err}}) + } + + if err != nil { + if _, nonNullChild := f.field.Type.(*types.NonNull); nonNullChild { + return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{err}}) + } + return sendAndReturnClosed(&Response{Data: []byte(fmt.Sprintf(`{"%s":null}`, f.field.Alias)), Errors: []*errors.QueryError{err}}) + } + + if ctxErr := ctx.Err(); ctxErr != nil { + return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{errors.Errorf("%s", ctxErr)}}) + } + + c := make(chan *Response) + // TODO: handle resolver nil channel better? + if result.IsZero() { + close(c) + return c + } + + go func() { + for { + // Check subscription context + chosen, resp, ok := reflect.Select([]reflect.SelectCase{ + { + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(ctx.Done()), + }, + { + Dir: reflect.SelectRecv, + Chan: result, + }, + }) + switch chosen { + // subscription context done + case 0: + close(c) + return + // upstream received + case 1: + // upstream closed + if !ok { + close(c) + return + } + + subR := &Request{ + Request: selected.Request{ + Doc: r.Request.Doc, + Vars: r.Request.Vars, + Schema: r.Request.Schema, + }, + Limiter: r.Limiter, + Tracer: r.Tracer, + Logger: r.Logger, + } + var out bytes.Buffer + func() { + timeout := r.SubscribeResolverTimeout + if timeout == 0 { + timeout = time.Second + } + + subCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // resolve response + func() { + defer subR.handlePanic(subCtx) + + var buf bytes.Buffer + subR.execSelectionSet(subCtx, f.sels, f.field.Type, &pathSegment{nil, f.field.Alias}, s, resp, &buf) + + propagateChildError := false + if _, nonNullChild := f.field.Type.(*types.NonNull); nonNullChild && resolvedToNull(&buf) { + propagateChildError = true + } + + if !propagateChildError { + out.WriteString(fmt.Sprintf(`{"%s":`, f.field.Alias)) + out.Write(buf.Bytes()) + out.WriteString(`}`) + } + }() + + if err := subCtx.Err(); err != nil { + c <- &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}} + return + } + + // Send response within timeout + // TODO: maybe block until sent? + select { + case <-subCtx.Done(): + case c <- &Response{Data: out.Bytes(), Errors: subR.Errs}: + } + }() + } + } + }() + + return c +} + +func sendAndReturnClosed(resp *Response) chan *Response { + c := make(chan *Response, 1) + c <- resp + close(c) + return c +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go new file mode 100644 index 00000000..ca0400cd --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go @@ -0,0 +1,156 @@ +package query + +import ( + "fmt" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/types" +) + +const ( + Query types.OperationType = "QUERY" + Mutation types.OperationType = "MUTATION" + Subscription types.OperationType = "SUBSCRIPTION" +) + +func Parse(queryString string) (*types.ExecutableDefinition, *errors.QueryError) { + l := common.NewLexer(queryString, false) + + var execDef *types.ExecutableDefinition + err := l.CatchSyntaxError(func() { execDef = parseExecutableDefinition(l) }) + if err != nil { + return nil, err + } + + return execDef, nil +} + +func parseExecutableDefinition(l *common.Lexer) *types.ExecutableDefinition { + ed := &types.ExecutableDefinition{} + l.ConsumeWhitespace() + for l.Peek() != scanner.EOF { + if l.Peek() == '{' { + op := &types.OperationDefinition{Type: Query, Loc: l.Location()} + op.Selections = parseSelectionSet(l) + ed.Operations = append(ed.Operations, op) + continue + } + + loc := l.Location() + switch x := l.ConsumeIdent(); x { + case "query": + op := parseOperation(l, Query) + op.Loc = loc + ed.Operations = append(ed.Operations, op) + + case "mutation": + ed.Operations = append(ed.Operations, parseOperation(l, Mutation)) + + case "subscription": + ed.Operations = append(ed.Operations, parseOperation(l, Subscription)) + + case "fragment": + frag := parseFragment(l) + frag.Loc = loc + ed.Fragments = append(ed.Fragments, frag) + + default: + l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "fragment"`, x)) + } + } + return ed +} + +func parseOperation(l *common.Lexer, opType types.OperationType) *types.OperationDefinition { + op := &types.OperationDefinition{Type: opType} + op.Name.Loc = l.Location() + if l.Peek() == scanner.Ident { + op.Name = l.ConsumeIdentWithLoc() + } + op.Directives = common.ParseDirectives(l) + if l.Peek() == '(' { + l.ConsumeToken('(') + for l.Peek() != ')' { + loc := l.Location() + l.ConsumeToken('$') + iv := common.ParseInputValue(l) + iv.Loc = loc + op.Vars = append(op.Vars, iv) + } + l.ConsumeToken(')') + } + op.Selections = parseSelectionSet(l) + return op +} + +func parseFragment(l *common.Lexer) *types.FragmentDefinition { + f := &types.FragmentDefinition{} + f.Name = l.ConsumeIdentWithLoc() + l.ConsumeKeyword("on") + f.On = types.TypeName{Ident: l.ConsumeIdentWithLoc()} + f.Directives = common.ParseDirectives(l) + f.Selections = parseSelectionSet(l) + return f +} + +func parseSelectionSet(l *common.Lexer) []types.Selection { + var sels []types.Selection + l.ConsumeToken('{') + for l.Peek() != '}' { + sels = append(sels, parseSelection(l)) + } + l.ConsumeToken('}') + return sels +} + +func parseSelection(l *common.Lexer) types.Selection { + if l.Peek() == '.' { + return parseSpread(l) + } + return parseFieldDef(l) +} + +func parseFieldDef(l *common.Lexer) *types.Field { + f := &types.Field{} + f.Alias = l.ConsumeIdentWithLoc() + f.Name = f.Alias + if l.Peek() == ':' { + l.ConsumeToken(':') + f.Name = l.ConsumeIdentWithLoc() + } + if l.Peek() == '(' { + f.Arguments = common.ParseArgumentList(l) + } + f.Directives = common.ParseDirectives(l) + if l.Peek() == '{' { + f.SelectionSetLoc = l.Location() + f.SelectionSet = parseSelectionSet(l) + } + return f +} + +func parseSpread(l *common.Lexer) types.Selection { + loc := l.Location() + l.ConsumeToken('.') + l.ConsumeToken('.') + l.ConsumeToken('.') + + f := &types.InlineFragment{Loc: loc} + if l.Peek() == scanner.Ident { + ident := l.ConsumeIdentWithLoc() + if ident.Name != "on" { + fs := &types.FragmentSpread{ + Name: ident, + Loc: loc, + } + fs.Directives = common.ParseDirectives(l) + return fs + } + f.On = types.TypeName{Ident: l.ConsumeIdentWithLoc()} + } + f.Directives = common.ParseDirectives(l) + f.Selections = parseSelectionSet(l) + return f +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go new file mode 100644 index 00000000..9f5bba56 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go @@ -0,0 +1,203 @@ +package schema + +import ( + "github.com/graph-gophers/graphql-go/types" +) + +func init() { + _ = newMeta() +} + +// newMeta initializes an instance of the meta Schema. +func newMeta() *types.Schema { + s := &types.Schema{ + EntryPointNames: make(map[string]string), + Types: make(map[string]types.NamedType), + Directives: make(map[string]*types.DirectiveDefinition), + } + + err := Parse(s, metaSrc, false) + if err != nil { + panic(err) + } + return s +} + +var metaSrc = ` + # The ` + "`" + `Int` + "`" + ` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1. + scalar Int + + # The ` + "`" + `Float` + "`" + ` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point). + scalar Float + + # The ` + "`" + `String` + "`" + ` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text. + scalar String + + # The ` + "`" + `Boolean` + "`" + ` scalar type represents ` + "`" + `true` + "`" + ` or ` + "`" + `false` + "`" + `. + scalar Boolean + + # The ` + "`" + `ID` + "`" + ` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as ` + "`" + `"4"` + "`" + `) or integer (such as ` + "`" + `4` + "`" + `) input value will be accepted as an ID. + scalar ID + + # Directs the executor to include this field or fragment only when the ` + "`" + `if` + "`" + ` argument is true. + directive @include( + # Included when true. + if: Boolean! + ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT + + # Directs the executor to skip this field or fragment when the ` + "`" + `if` + "`" + ` argument is true. + directive @skip( + # Skipped when true. + if: Boolean! + ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT + + # Marks an element of a GraphQL schema as no longer supported. + directive @deprecated( + # Explains why this element was deprecated, usually also including a suggestion + # for how to access supported similar data. Formatted in + # [Markdown](https://daringfireball.net/projects/markdown/). + reason: String = "No longer supported" + ) on FIELD_DEFINITION | ENUM_VALUE + + # A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document. + # + # In some cases, you need to provide options to alter GraphQL's execution behavior + # in ways field arguments will not suffice, such as conditionally including or + # skipping a field. Directives provide this by describing additional information + # to the executor. + type __Directive { + name: String! + description: String + locations: [__DirectiveLocation!]! + args: [__InputValue!]! + } + + # A Directive can be adjacent to many parts of the GraphQL language, a + # __DirectiveLocation describes one such possible adjacencies. + enum __DirectiveLocation { + # Location adjacent to a query operation. + QUERY + # Location adjacent to a mutation operation. + MUTATION + # Location adjacent to a subscription operation. + SUBSCRIPTION + # Location adjacent to a field. + FIELD + # Location adjacent to a fragment definition. + FRAGMENT_DEFINITION + # Location adjacent to a fragment spread. + FRAGMENT_SPREAD + # Location adjacent to an inline fragment. + INLINE_FRAGMENT + # Location adjacent to a schema definition. + SCHEMA + # Location adjacent to a scalar definition. + SCALAR + # Location adjacent to an object type definition. + OBJECT + # Location adjacent to a field definition. + FIELD_DEFINITION + # Location adjacent to an argument definition. + ARGUMENT_DEFINITION + # Location adjacent to an interface definition. + INTERFACE + # Location adjacent to a union definition. + UNION + # Location adjacent to an enum definition. + ENUM + # Location adjacent to an enum value definition. + ENUM_VALUE + # Location adjacent to an input object type definition. + INPUT_OBJECT + # Location adjacent to an input object field definition. + INPUT_FIELD_DEFINITION + } + + # One possible value for a given Enum. Enum values are unique values, not a + # placeholder for a string or numeric value. However an Enum value is returned in + # a JSON response as a string. + type __EnumValue { + name: String! + description: String + isDeprecated: Boolean! + deprecationReason: String + } + + # Object and Interface types are described by a list of Fields, each of which has + # a name, potentially a list of arguments, and a return type. + type __Field { + name: String! + description: String + args: [__InputValue!]! + type: __Type! + isDeprecated: Boolean! + deprecationReason: String + } + + # Arguments provided to Fields or Directives and the input fields of an + # InputObject are represented as Input Values which describe their type and + # optionally a default value. + type __InputValue { + name: String! + description: String + type: __Type! + # A GraphQL-formatted string representing the default value for this input value. + defaultValue: String + } + + # A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all + # available types and directives on the server, as well as the entry points for + # query, mutation, and subscription operations. + type __Schema { + # A list of all types supported by this server. + types: [__Type!]! + # The type that query operations will be rooted at. + queryType: __Type! + # If this server supports mutation, the type that mutation operations will be rooted at. + mutationType: __Type + # If this server support subscription, the type that subscription operations will be rooted at. + subscriptionType: __Type + # A list of all directives supported by this server. + directives: [__Directive!]! + } + + # The fundamental unit of any GraphQL Schema is the type. There are many kinds of + # types in GraphQL as represented by the ` + "`" + `__TypeKind` + "`" + ` enum. + # + # Depending on the kind of a type, certain fields describe information about that + # type. Scalar types provide no information beyond a name and description, while + # Enum types provide their values. Object and Interface types provide the fields + # they describe. Abstract types, Union and Interface, provide the Object types + # possible at runtime. List and NonNull types compose other types. + type __Type { + kind: __TypeKind! + name: String + description: String + fields(includeDeprecated: Boolean = false): [__Field!] + interfaces: [__Type!] + possibleTypes: [__Type!] + enumValues(includeDeprecated: Boolean = false): [__EnumValue!] + inputFields: [__InputValue!] + ofType: __Type + } + + # An enum describing what kind of type a given ` + "`" + `__Type` + "`" + ` is. + enum __TypeKind { + # Indicates this type is a scalar. + SCALAR + # Indicates this type is an object. ` + "`" + `fields` + "`" + ` and ` + "`" + `interfaces` + "`" + ` are valid fields. + OBJECT + # Indicates this type is an interface. ` + "`" + `fields` + "`" + ` and ` + "`" + `possibleTypes` + "`" + ` are valid fields. + INTERFACE + # Indicates this type is a union. ` + "`" + `possibleTypes` + "`" + ` is a valid field. + UNION + # Indicates this type is an enum. ` + "`" + `enumValues` + "`" + ` is a valid field. + ENUM + # Indicates this type is an input object. ` + "`" + `inputFields` + "`" + ` is a valid field. + INPUT_OBJECT + # Indicates this type is a list. ` + "`" + `ofType` + "`" + ` is a valid field. + LIST + # Indicates this type is a non-null. ` + "`" + `ofType` + "`" + ` is a valid field. + NON_NULL + } +` diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go new file mode 100644 index 00000000..fb301c46 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go @@ -0,0 +1,586 @@ +package schema + +import ( + "fmt" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/types" +) + +// New initializes an instance of Schema. +func New() *types.Schema { + s := &types.Schema{ + EntryPointNames: make(map[string]string), + Types: make(map[string]types.NamedType), + Directives: make(map[string]*types.DirectiveDefinition), + } + m := newMeta() + for n, t := range m.Types { + s.Types[n] = t + } + for n, d := range m.Directives { + s.Directives[n] = d + } + return s +} + +func Parse(s *types.Schema, schemaString string, useStringDescriptions bool) error { + l := common.NewLexer(schemaString, useStringDescriptions) + err := l.CatchSyntaxError(func() { parseSchema(s, l) }) + if err != nil { + return err + } + + if err := mergeExtensions(s); err != nil { + return err + } + + for _, t := range s.Types { + if err := resolveNamedType(s, t); err != nil { + return err + } + } + for _, d := range s.Directives { + for _, arg := range d.Arguments { + t, err := common.ResolveType(arg.Type, s.Resolve) + if err != nil { + return err + } + arg.Type = t + } + } + + // https://graphql.github.io/graphql-spec/June2018/#sec-Root-Operation-Types + // > While any type can be the root operation type for a GraphQL operation, the type system definition language can + // > omit the schema definition when the query, mutation, and subscription root types are named Query, Mutation, + // > and Subscription respectively. + if len(s.EntryPointNames) == 0 { + if _, ok := s.Types["Query"]; ok { + s.EntryPointNames["query"] = "Query" + } + if _, ok := s.Types["Mutation"]; ok { + s.EntryPointNames["mutation"] = "Mutation" + } + if _, ok := s.Types["Subscription"]; ok { + s.EntryPointNames["subscription"] = "Subscription" + } + } + s.EntryPoints = make(map[string]types.NamedType) + for key, name := range s.EntryPointNames { + t, ok := s.Types[name] + if !ok { + return errors.Errorf("type %q not found", name) + } + s.EntryPoints[key] = t + } + + // Interface types need validation: https://spec.graphql.org/draft/#sec-Interfaces.Interfaces-Implementing-Interfaces + for _, typeDef := range s.Types { + switch t := typeDef.(type) { + case *types.InterfaceTypeDefinition: + for i, implements := range t.Interfaces { + typ, ok := s.Types[implements.Name] + if !ok { + return errors.Errorf("interface %q not found", implements) + } + inteface, ok := typ.(*types.InterfaceTypeDefinition) + if !ok { + return errors.Errorf("type %q is not an interface", inteface) + } + + for _, f := range inteface.Fields.Names() { + if t.Fields.Get(f) == nil { + return errors.Errorf("interface %q expects field %q but %q does not provide it", inteface.Name, f, t.Name) + } + } + + t.Interfaces[i] = inteface + } + default: + continue + } + } + + for _, obj := range s.Objects { + obj.Interfaces = make([]*types.InterfaceTypeDefinition, len(obj.InterfaceNames)) + if err := resolveDirectives(s, obj.Directives, "OBJECT"); err != nil { + return err + } + for _, field := range obj.Fields { + if err := resolveDirectives(s, field.Directives, "FIELD_DEFINITION"); err != nil { + return err + } + } + for i, intfName := range obj.InterfaceNames { + t, ok := s.Types[intfName] + if !ok { + return errors.Errorf("interface %q not found", intfName) + } + intf, ok := t.(*types.InterfaceTypeDefinition) + if !ok { + return errors.Errorf("type %q is not an interface", intfName) + } + for _, f := range intf.Fields.Names() { + if obj.Fields.Get(f) == nil { + return errors.Errorf("interface %q expects field %q but %q does not provide it", intfName, f, obj.Name) + } + } + obj.Interfaces[i] = intf + intf.PossibleTypes = append(intf.PossibleTypes, obj) + } + } + + for _, union := range s.Unions { + if err := resolveDirectives(s, union.Directives, "UNION"); err != nil { + return err + } + union.UnionMemberTypes = make([]*types.ObjectTypeDefinition, len(union.TypeNames)) + for i, name := range union.TypeNames { + t, ok := s.Types[name] + if !ok { + return errors.Errorf("object type %q not found", name) + } + obj, ok := t.(*types.ObjectTypeDefinition) + if !ok { + return errors.Errorf("type %q is not an object", name) + } + union.UnionMemberTypes[i] = obj + } + } + + for _, enum := range s.Enums { + if err := resolveDirectives(s, enum.Directives, "ENUM"); err != nil { + return err + } + for _, value := range enum.EnumValuesDefinition { + if err := resolveDirectives(s, value.Directives, "ENUM_VALUE"); err != nil { + return err + } + } + } + + return nil +} + +func ParseSchema(schemaString string, useStringDescriptions bool) (*types.Schema, error) { + s := New() + err := Parse(s, schemaString, useStringDescriptions) + return s, err +} + +func mergeExtensions(s *types.Schema) error { + for _, ext := range s.Extensions { + typ := s.Types[ext.Type.TypeName()] + if typ == nil { + return fmt.Errorf("trying to extend unknown type %q", ext.Type.TypeName()) + } + + if typ.Kind() != ext.Type.Kind() { + return fmt.Errorf("trying to extend type %q with type %q", typ.Kind(), ext.Type.Kind()) + } + + switch og := typ.(type) { + case *types.ObjectTypeDefinition: + e := ext.Type.(*types.ObjectTypeDefinition) + + for _, field := range e.Fields { + if og.Fields.Get(field.Name) != nil { + return fmt.Errorf("extended field %q already exists", field.Name) + } + } + og.Fields = append(og.Fields, e.Fields...) + + for _, en := range e.InterfaceNames { + for _, on := range og.InterfaceNames { + if on == en { + return fmt.Errorf("interface %q implemented in the extension is already implemented in %q", on, og.Name) + } + } + } + og.InterfaceNames = append(og.InterfaceNames, e.InterfaceNames...) + + case *types.InputObject: + e := ext.Type.(*types.InputObject) + + for _, field := range e.Values { + if og.Values.Get(field.Name.Name) != nil { + return fmt.Errorf("extended field %q already exists", field.Name) + } + } + og.Values = append(og.Values, e.Values...) + + case *types.InterfaceTypeDefinition: + e := ext.Type.(*types.InterfaceTypeDefinition) + + for _, field := range e.Fields { + if og.Fields.Get(field.Name) != nil { + return fmt.Errorf("extended field %s already exists", field.Name) + } + } + og.Fields = append(og.Fields, e.Fields...) + + case *types.Union: + e := ext.Type.(*types.Union) + + for _, en := range e.TypeNames { + for _, on := range og.TypeNames { + if on == en { + return fmt.Errorf("union type %q already declared in %q", on, og.Name) + } + } + } + og.TypeNames = append(og.TypeNames, e.TypeNames...) + + case *types.EnumTypeDefinition: + e := ext.Type.(*types.EnumTypeDefinition) + + for _, en := range e.EnumValuesDefinition { + for _, on := range og.EnumValuesDefinition { + if on.EnumValue == en.EnumValue { + return fmt.Errorf("enum value %q already declared in %q", on.EnumValue, og.Name) + } + } + } + og.EnumValuesDefinition = append(og.EnumValuesDefinition, e.EnumValuesDefinition...) + default: + return fmt.Errorf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union" or "input"`, og.TypeName()) + } + } + + return nil +} + +func resolveNamedType(s *types.Schema, t types.NamedType) error { + switch t := t.(type) { + case *types.ObjectTypeDefinition: + for _, f := range t.Fields { + if err := resolveField(s, f); err != nil { + return err + } + } + case *types.InterfaceTypeDefinition: + for _, f := range t.Fields { + if err := resolveField(s, f); err != nil { + return err + } + } + case *types.InputObject: + if err := resolveInputObject(s, t.Values); err != nil { + return err + } + } + return nil +} + +func resolveField(s *types.Schema, f *types.FieldDefinition) error { + t, err := common.ResolveType(f.Type, s.Resolve) + if err != nil { + return err + } + f.Type = t + if err := resolveDirectives(s, f.Directives, "FIELD_DEFINITION"); err != nil { + return err + } + return resolveInputObject(s, f.Arguments) +} + +func resolveDirectives(s *types.Schema, directives types.DirectiveList, loc string) error { + for _, d := range directives { + dirName := d.Name.Name + dd, ok := s.Directives[dirName] + if !ok { + return errors.Errorf("directive %q not found", dirName) + } + validLoc := false + for _, l := range dd.Locations { + if l == loc { + validLoc = true + break + } + } + if !validLoc { + return errors.Errorf("invalid location %q for directive %q (must be one of %v)", loc, dirName, dd.Locations) + } + for _, arg := range d.Arguments { + if dd.Arguments.Get(arg.Name.Name) == nil { + return errors.Errorf("invalid argument %q for directive %q", arg.Name.Name, dirName) + } + } + for _, arg := range dd.Arguments { + if _, ok := d.Arguments.Get(arg.Name.Name); !ok { + d.Arguments = append(d.Arguments, &types.Argument{Name: arg.Name, Value: arg.Default}) + } + } + } + return nil +} + +func resolveInputObject(s *types.Schema, values types.ArgumentsDefinition) error { + for _, v := range values { + t, err := common.ResolveType(v.Type, s.Resolve) + if err != nil { + return err + } + v.Type = t + } + return nil +} + +func parseSchema(s *types.Schema, l *common.Lexer) { + l.ConsumeWhitespace() + + for l.Peek() != scanner.EOF { + desc := l.DescComment() + switch x := l.ConsumeIdent(); x { + + case "schema": + l.ConsumeToken('{') + for l.Peek() != '}' { + + name := l.ConsumeIdent() + l.ConsumeToken(':') + typ := l.ConsumeIdent() + s.EntryPointNames[name] = typ + } + l.ConsumeToken('}') + + case "type": + obj := parseObjectDef(l) + obj.Desc = desc + s.Types[obj.Name] = obj + s.Objects = append(s.Objects, obj) + + case "interface": + iface := parseInterfaceDef(l) + iface.Desc = desc + s.Types[iface.Name] = iface + + case "union": + union := parseUnionDef(l) + union.Desc = desc + s.Types[union.Name] = union + s.Unions = append(s.Unions, union) + + case "enum": + enum := parseEnumDef(l) + enum.Desc = desc + s.Types[enum.Name] = enum + s.Enums = append(s.Enums, enum) + + case "input": + input := parseInputDef(l) + input.Desc = desc + s.Types[input.Name] = input + + case "scalar": + loc := l.Location() + name := l.ConsumeIdent() + directives := common.ParseDirectives(l) + s.Types[name] = &types.ScalarTypeDefinition{Name: name, Desc: desc, Directives: directives, Loc: loc} + + case "directive": + directive := parseDirectiveDef(l) + directive.Desc = desc + s.Directives[directive.Name] = directive + + case "extend": + parseExtension(s, l) + + default: + // TODO: Add support for type extensions. + l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union", "input", "scalar" or "directive"`, x)) + } + } +} + +func parseObjectDef(l *common.Lexer) *types.ObjectTypeDefinition { + object := &types.ObjectTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()} + + for { + if l.Peek() == '{' { + break + } + + if l.Peek() == '@' { + object.Directives = common.ParseDirectives(l) + continue + } + + if l.Peek() == scanner.Ident { + l.ConsumeKeyword("implements") + + for l.Peek() != '{' && l.Peek() != '@' { + if l.Peek() == '&' { + l.ConsumeToken('&') + } + + object.InterfaceNames = append(object.InterfaceNames, l.ConsumeIdent()) + } + continue + } + + } + l.ConsumeToken('{') + object.Fields = parseFieldsDef(l) + l.ConsumeToken('}') + + return object + +} + +func parseInterfaceDef(l *common.Lexer) *types.InterfaceTypeDefinition { + i := &types.InterfaceTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()} + + if l.Peek() == scanner.Ident { + l.ConsumeKeyword("implements") + i.Interfaces = append(i.Interfaces, &types.InterfaceTypeDefinition{Name: l.ConsumeIdent()}) + + for l.Peek() == '&' { + l.ConsumeToken('&') + i.Interfaces = append(i.Interfaces, &types.InterfaceTypeDefinition{Name: l.ConsumeIdent()}) + } + } + + i.Directives = common.ParseDirectives(l) + + l.ConsumeToken('{') + i.Fields = parseFieldsDef(l) + l.ConsumeToken('}') + + return i +} + +func parseUnionDef(l *common.Lexer) *types.Union { + union := &types.Union{Loc: l.Location(), Name: l.ConsumeIdent()} + + union.Directives = common.ParseDirectives(l) + l.ConsumeToken('=') + union.TypeNames = []string{l.ConsumeIdent()} + for l.Peek() == '|' { + l.ConsumeToken('|') + union.TypeNames = append(union.TypeNames, l.ConsumeIdent()) + } + + return union +} + +func parseInputDef(l *common.Lexer) *types.InputObject { + i := &types.InputObject{} + i.Loc = l.Location() + i.Name = l.ConsumeIdent() + i.Directives = common.ParseDirectives(l) + l.ConsumeToken('{') + for l.Peek() != '}' { + i.Values = append(i.Values, common.ParseInputValue(l)) + } + l.ConsumeToken('}') + return i +} + +func parseEnumDef(l *common.Lexer) *types.EnumTypeDefinition { + enum := &types.EnumTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()} + + enum.Directives = common.ParseDirectives(l) + l.ConsumeToken('{') + for l.Peek() != '}' { + v := &types.EnumValueDefinition{ + Desc: l.DescComment(), + Loc: l.Location(), + EnumValue: l.ConsumeIdent(), + Directives: common.ParseDirectives(l), + } + + enum.EnumValuesDefinition = append(enum.EnumValuesDefinition, v) + } + l.ConsumeToken('}') + return enum +} +func parseDirectiveDef(l *common.Lexer) *types.DirectiveDefinition { + l.ConsumeToken('@') + loc := l.Location() + d := &types.DirectiveDefinition{Name: l.ConsumeIdent(), Loc: loc} + + if l.Peek() == '(' { + l.ConsumeToken('(') + for l.Peek() != ')' { + v := common.ParseInputValue(l) + d.Arguments = append(d.Arguments, v) + } + l.ConsumeToken(')') + } + + l.ConsumeKeyword("on") + + for { + loc := l.ConsumeIdent() + d.Locations = append(d.Locations, loc) + if l.Peek() != '|' { + break + } + l.ConsumeToken('|') + } + return d +} + +func parseExtension(s *types.Schema, l *common.Lexer) { + loc := l.Location() + switch x := l.ConsumeIdent(); x { + case "schema": + l.ConsumeToken('{') + for l.Peek() != '}' { + name := l.ConsumeIdent() + l.ConsumeToken(':') + typ := l.ConsumeIdent() + s.EntryPointNames[name] = typ + } + l.ConsumeToken('}') + + case "type": + obj := parseObjectDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: obj, Loc: loc}) + + case "interface": + iface := parseInterfaceDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: iface, Loc: loc}) + + case "union": + union := parseUnionDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: union, Loc: loc}) + + case "enum": + enum := parseEnumDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: enum, Loc: loc}) + + case "input": + input := parseInputDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: input, Loc: loc}) + + default: + // TODO: Add ScalarTypeDefinition when adding directives + l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union" or "input"`, x)) + } +} + +func parseFieldsDef(l *common.Lexer) types.FieldsDefinition { + var fields types.FieldsDefinition + for l.Peek() != '}' { + f := &types.FieldDefinition{} + f.Desc = l.DescComment() + f.Loc = l.Location() + f.Name = l.ConsumeIdent() + if l.Peek() == '(' { + l.ConsumeToken('(') + for l.Peek() != ')' { + f.Arguments = append(f.Arguments, common.ParseInputValue(l)) + } + l.ConsumeToken(')') + } + l.ConsumeToken(':') + f.Type = common.ParseType(l) + f.Directives = common.ParseDirectives(l) + fields = append(fields, f) + } + return fields +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go new file mode 100644 index 00000000..9702b5f5 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go @@ -0,0 +1,71 @@ +package validation + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +func makeSuggestion(prefix string, options []string, input string) string { + var selected []string + distances := make(map[string]int) + for _, opt := range options { + distance := levenshteinDistance(input, opt) + threshold := max(len(input)/2, max(len(opt)/2, 1)) + if distance < threshold { + selected = append(selected, opt) + distances[opt] = distance + } + } + + if len(selected) == 0 { + return "" + } + sort.Slice(selected, func(i, j int) bool { + return distances[selected[i]] < distances[selected[j]] + }) + + parts := make([]string, len(selected)) + for i, opt := range selected { + parts[i] = strconv.Quote(opt) + } + if len(parts) > 1 { + parts[len(parts)-1] = "or " + parts[len(parts)-1] + } + return fmt.Sprintf(" %s %s?", prefix, strings.Join(parts, ", ")) +} + +func levenshteinDistance(s1, s2 string) int { + column := make([]int, len(s1)+1) + for y := range s1 { + column[y+1] = y + 1 + } + for x, rx := range s2 { + column[0] = x + 1 + lastdiag := x + for y, ry := range s1 { + olddiag := column[y+1] + if rx != ry { + lastdiag++ + } + column[y+1] = min(column[y+1]+1, min(column[y]+1, lastdiag)) + lastdiag = olddiag + } + } + return column[len(s1)] +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go new file mode 100644 index 00000000..e3672638 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go @@ -0,0 +1,980 @@ +package validation + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/types" +) + +type varSet map[*types.InputValueDefinition]struct{} + +type selectionPair struct{ a, b types.Selection } + +type nameSet map[string]errors.Location + +type fieldInfo struct { + sf *types.FieldDefinition + parent types.NamedType +} + +type context struct { + schema *types.Schema + doc *types.ExecutableDefinition + errs []*errors.QueryError + opErrs map[*types.OperationDefinition][]*errors.QueryError + usedVars map[*types.OperationDefinition]varSet + fieldMap map[*types.Field]fieldInfo + overlapValidated map[selectionPair]struct{} + maxDepth int +} + +func (c *context) addErr(loc errors.Location, rule string, format string, a ...interface{}) { + c.addErrMultiLoc([]errors.Location{loc}, rule, format, a...) +} + +func (c *context) addErrMultiLoc(locs []errors.Location, rule string, format string, a ...interface{}) { + c.errs = append(c.errs, &errors.QueryError{ + Message: fmt.Sprintf(format, a...), + Locations: locs, + Rule: rule, + }) +} + +type opContext struct { + *context + ops []*types.OperationDefinition +} + +func newContext(s *types.Schema, doc *types.ExecutableDefinition, maxDepth int) *context { + return &context{ + schema: s, + doc: doc, + opErrs: make(map[*types.OperationDefinition][]*errors.QueryError), + usedVars: make(map[*types.OperationDefinition]varSet), + fieldMap: make(map[*types.Field]fieldInfo), + overlapValidated: make(map[selectionPair]struct{}), + maxDepth: maxDepth, + } +} + +func Validate(s *types.Schema, doc *types.ExecutableDefinition, variables map[string]interface{}, maxDepth int) []*errors.QueryError { + c := newContext(s, doc, maxDepth) + + opNames := make(nameSet) + fragUsedBy := make(map[*types.FragmentDefinition][]*types.OperationDefinition) + for _, op := range doc.Operations { + c.usedVars[op] = make(varSet) + opc := &opContext{c, []*types.OperationDefinition{op}} + + // Check if max depth is exceeded, if it's set. If max depth is exceeded, + // don't continue to validate the document and exit early. + if validateMaxDepth(opc, op.Selections, nil, 1) { + return c.errs + } + + if op.Name.Name == "" && len(doc.Operations) != 1 { + c.addErr(op.Loc, "LoneAnonymousOperation", "This anonymous operation must be the only defined operation.") + } + if op.Name.Name != "" { + validateName(c, opNames, op.Name, "UniqueOperationNames", "operation") + } + + validateDirectives(opc, string(op.Type), op.Directives) + + varNames := make(nameSet) + for _, v := range op.Vars { + validateName(c, varNames, v.Name, "UniqueVariableNames", "variable") + + t := resolveType(c, v.Type) + if !canBeInput(t) { + c.addErr(v.TypeLoc, "VariablesAreInputTypes", "Variable %q cannot be non-input type %q.", "$"+v.Name.Name, t) + } + validateValue(opc, v, variables[v.Name.Name], t) + + if v.Default != nil { + validateLiteral(opc, v.Default) + + if t != nil { + if nn, ok := t.(*types.NonNull); ok { + c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q is required and will not use the default value. Perhaps you meant to use type %q.", "$"+v.Name.Name, t, nn.OfType) + } + + if ok, reason := validateValueType(opc, v.Default, t); !ok { + c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q has invalid default value %s.\n%s", "$"+v.Name.Name, t, v.Default, reason) + } + } + } + } + + var entryPoint types.NamedType + switch op.Type { + case query.Query: + entryPoint = s.EntryPoints["query"] + case query.Mutation: + entryPoint = s.EntryPoints["mutation"] + case query.Subscription: + entryPoint = s.EntryPoints["subscription"] + default: + panic("unreachable") + } + + validateSelectionSet(opc, op.Selections, entryPoint) + + fragUsed := make(map[*types.FragmentDefinition]struct{}) + markUsedFragments(c, op.Selections, fragUsed) + for frag := range fragUsed { + fragUsedBy[frag] = append(fragUsedBy[frag], op) + } + } + + fragNames := make(nameSet) + fragVisited := make(map[*types.FragmentDefinition]struct{}) + for _, frag := range doc.Fragments { + opc := &opContext{c, fragUsedBy[frag]} + + validateName(c, fragNames, frag.Name, "UniqueFragmentNames", "fragment") + validateDirectives(opc, "FRAGMENT_DEFINITION", frag.Directives) + + t := unwrapType(resolveType(c, &frag.On)) + // continue even if t is nil + if t != nil && !canBeFragment(t) { + c.addErr(frag.On.Loc, "FragmentsOnCompositeTypes", "Fragment %q cannot condition on non composite type %q.", frag.Name.Name, t) + continue + } + + validateSelectionSet(opc, frag.Selections, t) + + if _, ok := fragVisited[frag]; !ok { + detectFragmentCycle(c, frag.Selections, fragVisited, nil, map[string]int{frag.Name.Name: 0}) + } + } + + for _, frag := range doc.Fragments { + if len(fragUsedBy[frag]) == 0 { + c.addErr(frag.Loc, "NoUnusedFragments", "Fragment %q is never used.", frag.Name.Name) + } + } + + for _, op := range doc.Operations { + c.errs = append(c.errs, c.opErrs[op]...) + + opUsedVars := c.usedVars[op] + for _, v := range op.Vars { + if _, ok := opUsedVars[v]; !ok { + opSuffix := "" + if op.Name.Name != "" { + opSuffix = fmt.Sprintf(" in operation %q", op.Name.Name) + } + c.addErr(v.Loc, "NoUnusedVariables", "Variable %q is never used%s.", "$"+v.Name.Name, opSuffix) + } + } + } + + return c.errs +} + +func validateValue(c *opContext, v *types.InputValueDefinition, val interface{}, t types.Type) { + switch t := t.(type) { + case *types.NonNull: + if val == nil { + c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid value null.\nExpected type \"%s\", found null.", v.Name.Name, t) + return + } + validateValue(c, v, val, t.OfType) + case *types.List: + if val == nil { + return + } + vv, ok := val.([]interface{}) + if !ok { + // Input coercion rules allow single items without wrapping array + validateValue(c, v, val, t.OfType) + return + } + for _, elem := range vv { + validateValue(c, v, elem, t.OfType) + } + case *types.EnumTypeDefinition: + if val == nil { + return + } + e, ok := val.(string) + if !ok { + c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid type %T.\nExpected type \"%s\", found %v.", v.Name.Name, val, t, val) + return + } + for _, option := range t.EnumValuesDefinition { + if option.EnumValue == e { + return + } + } + c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid value %s.\nExpected type \"%s\", found %s.", v.Name.Name, e, t, e) + case *types.InputObject: + if val == nil { + return + } + in, ok := val.(map[string]interface{}) + if !ok { + c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid type %T.\nExpected type \"%s\", found %s.", v.Name.Name, val, t, val) + return + } + for _, f := range t.Values { + fieldVal := in[f.Name.Name] + validateValue(c, f, fieldVal, f.Type) + } + } +} + +// validates the query doesn't go deeper than maxDepth (if set). Returns whether +// or not query validated max depth to avoid excessive recursion. +// +// The visited map is necessary to ensure that max depth validation does not get stuck in cyclical +// fragment spreads. +func validateMaxDepth(c *opContext, sels []types.Selection, visited map[*types.FragmentDefinition]struct{}, depth int) bool { + // maxDepth checking is turned off when maxDepth is 0 + if c.maxDepth == 0 { + return false + } + + exceededMaxDepth := false + if visited == nil { + visited = map[*types.FragmentDefinition]struct{}{} + } + + for _, sel := range sels { + switch sel := sel.(type) { + case *types.Field: + if depth > c.maxDepth { + exceededMaxDepth = true + c.addErr(sel.Alias.Loc, "MaxDepthExceeded", "Field %q has depth %d that exceeds max depth %d", sel.Name.Name, depth, c.maxDepth) + continue + } + exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.SelectionSet, visited, depth+1) + + case *types.InlineFragment: + // Depth is not checked because inline fragments resolve to other fields which are checked. + // Depth is not incremented because inline fragments have the same depth as neighboring fields + exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.Selections, visited, depth) + case *types.FragmentSpread: + // Depth is not checked because fragments resolve to other fields which are checked. + frag := c.doc.Fragments.Get(sel.Name.Name) + if frag == nil { + // In case of unknown fragment (invalid request), ignore max depth evaluation + c.addErr(sel.Loc, "MaxDepthEvaluationError", "Unknown fragment %q. Unable to evaluate depth.", sel.Name.Name) + continue + } + + if _, ok := visited[frag]; ok { + // we've already seen this fragment, don't check depth again. + continue + } + visited[frag] = struct{}{} + + // Depth is not incremented because fragments have the same depth as surrounding fields + exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, frag.Selections, visited, depth) + } + } + + return exceededMaxDepth +} + +func validateSelectionSet(c *opContext, sels []types.Selection, t types.NamedType) { + for _, sel := range sels { + validateSelection(c, sel, t) + } + + for i, a := range sels { + for _, b := range sels[i+1:] { + c.validateOverlap(a, b, nil, nil) + } + } +} + +func validateSelection(c *opContext, sel types.Selection, t types.NamedType) { + switch sel := sel.(type) { + case *types.Field: + validateDirectives(c, "FIELD", sel.Directives) + + fieldName := sel.Name.Name + var f *types.FieldDefinition + switch fieldName { + case "__typename": + f = &types.FieldDefinition{ + Name: "__typename", + Type: c.schema.Types["String"], + } + case "__schema": + f = &types.FieldDefinition{ + Name: "__schema", + Type: c.schema.Types["__Schema"], + } + case "__type": + f = &types.FieldDefinition{ + Name: "__type", + Arguments: types.ArgumentsDefinition{ + &types.InputValueDefinition{ + Name: types.Ident{Name: "name"}, + Type: &types.NonNull{OfType: c.schema.Types["String"]}, + }, + }, + Type: c.schema.Types["__Type"], + } + default: + f = fields(t).Get(fieldName) + if f == nil && t != nil { + suggestion := makeSuggestion("Did you mean", fields(t).Names(), fieldName) + c.addErr(sel.Alias.Loc, "FieldsOnCorrectType", "Cannot query field %q on type %q.%s", fieldName, t, suggestion) + } + } + c.fieldMap[sel] = fieldInfo{sf: f, parent: t} + + validateArgumentLiterals(c, sel.Arguments) + if f != nil { + validateArgumentTypes(c, sel.Arguments, f.Arguments, sel.Alias.Loc, + func() string { return fmt.Sprintf("field %q of type %q", fieldName, t) }, + func() string { return fmt.Sprintf("Field %q", fieldName) }, + ) + } + + var ft types.Type + if f != nil { + ft = f.Type + sf := hasSubfields(ft) + if sf && sel.SelectionSet == nil { + c.addErr(sel.Alias.Loc, "ScalarLeafs", "Field %q of type %q must have a selection of subfields. Did you mean \"%s { ... }\"?", fieldName, ft, fieldName) + } + if !sf && sel.SelectionSet != nil { + c.addErr(sel.SelectionSetLoc, "ScalarLeafs", "Field %q must not have a selection since type %q has no subfields.", fieldName, ft) + } + } + if sel.SelectionSet != nil { + validateSelectionSet(c, sel.SelectionSet, unwrapType(ft)) + } + + case *types.InlineFragment: + validateDirectives(c, "INLINE_FRAGMENT", sel.Directives) + if sel.On.Name != "" { + fragTyp := unwrapType(resolveType(c.context, &sel.On)) + if fragTyp != nil && !compatible(t, fragTyp) { + c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment cannot be spread here as objects of type %q can never be of type %q.", t, fragTyp) + } + t = fragTyp + // continue even if t is nil + } + if t != nil && !canBeFragment(t) { + c.addErr(sel.On.Loc, "FragmentsOnCompositeTypes", "Fragment cannot condition on non composite type %q.", t) + return + } + validateSelectionSet(c, sel.Selections, unwrapType(t)) + + case *types.FragmentSpread: + validateDirectives(c, "FRAGMENT_SPREAD", sel.Directives) + frag := c.doc.Fragments.Get(sel.Name.Name) + if frag == nil { + c.addErr(sel.Name.Loc, "KnownFragmentNames", "Unknown fragment %q.", sel.Name.Name) + return + } + fragTyp := c.schema.Types[frag.On.Name] + if !compatible(t, fragTyp) { + c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment %q cannot be spread here as objects of type %q can never be of type %q.", frag.Name.Name, t, fragTyp) + } + + default: + panic("unreachable") + } +} + +func compatible(a, b types.Type) bool { + for _, pta := range possibleTypes(a) { + for _, ptb := range possibleTypes(b) { + if pta == ptb { + return true + } + } + } + return false +} + +func possibleTypes(t types.Type) []*types.ObjectTypeDefinition { + switch t := t.(type) { + case *types.ObjectTypeDefinition: + return []*types.ObjectTypeDefinition{t} + case *types.InterfaceTypeDefinition: + return t.PossibleTypes + case *types.Union: + return t.UnionMemberTypes + default: + return nil + } +} + +func markUsedFragments(c *context, sels []types.Selection, fragUsed map[*types.FragmentDefinition]struct{}) { + for _, sel := range sels { + switch sel := sel.(type) { + case *types.Field: + if sel.SelectionSet != nil { + markUsedFragments(c, sel.SelectionSet, fragUsed) + } + + case *types.InlineFragment: + markUsedFragments(c, sel.Selections, fragUsed) + + case *types.FragmentSpread: + frag := c.doc.Fragments.Get(sel.Name.Name) + if frag == nil { + return + } + + if _, ok := fragUsed[frag]; ok { + continue + } + + fragUsed[frag] = struct{}{} + markUsedFragments(c, frag.Selections, fragUsed) + + default: + panic("unreachable") + } + } +} + +func detectFragmentCycle(c *context, sels []types.Selection, fragVisited map[*types.FragmentDefinition]struct{}, spreadPath []*types.FragmentSpread, spreadPathIndex map[string]int) { + for _, sel := range sels { + detectFragmentCycleSel(c, sel, fragVisited, spreadPath, spreadPathIndex) + } +} + +func detectFragmentCycleSel(c *context, sel types.Selection, fragVisited map[*types.FragmentDefinition]struct{}, spreadPath []*types.FragmentSpread, spreadPathIndex map[string]int) { + switch sel := sel.(type) { + case *types.Field: + if sel.SelectionSet != nil { + detectFragmentCycle(c, sel.SelectionSet, fragVisited, spreadPath, spreadPathIndex) + } + + case *types.InlineFragment: + detectFragmentCycle(c, sel.Selections, fragVisited, spreadPath, spreadPathIndex) + + case *types.FragmentSpread: + frag := c.doc.Fragments.Get(sel.Name.Name) + if frag == nil { + return + } + + spreadPath = append(spreadPath, sel) + if i, ok := spreadPathIndex[frag.Name.Name]; ok { + cyclePath := spreadPath[i:] + via := "" + if len(cyclePath) > 1 { + names := make([]string, len(cyclePath)-1) + for i, frag := range cyclePath[:len(cyclePath)-1] { + names[i] = frag.Name.Name + } + via = " via " + strings.Join(names, ", ") + } + + locs := make([]errors.Location, len(cyclePath)) + for i, frag := range cyclePath { + locs[i] = frag.Loc + } + c.addErrMultiLoc(locs, "NoFragmentCycles", "Cannot spread fragment %q within itself%s.", frag.Name.Name, via) + return + } + + if _, ok := fragVisited[frag]; ok { + return + } + fragVisited[frag] = struct{}{} + + spreadPathIndex[frag.Name.Name] = len(spreadPath) + detectFragmentCycle(c, frag.Selections, fragVisited, spreadPath, spreadPathIndex) + delete(spreadPathIndex, frag.Name.Name) + + default: + panic("unreachable") + } +} + +func (c *context) validateOverlap(a, b types.Selection, reasons *[]string, locs *[]errors.Location) { + if a == b { + return + } + + if _, ok := c.overlapValidated[selectionPair{a, b}]; ok { + return + } + c.overlapValidated[selectionPair{a, b}] = struct{}{} + c.overlapValidated[selectionPair{b, a}] = struct{}{} + + switch a := a.(type) { + case *types.Field: + switch b := b.(type) { + case *types.Field: + if b.Alias.Loc.Before(a.Alias.Loc) { + a, b = b, a + } + if reasons2, locs2 := c.validateFieldOverlap(a, b); len(reasons2) != 0 { + locs2 = append(locs2, a.Alias.Loc, b.Alias.Loc) + if reasons == nil { + c.addErrMultiLoc(locs2, "OverlappingFieldsCanBeMerged", "Fields %q conflict because %s. Use different aliases on the fields to fetch both if this was intentional.", a.Alias.Name, strings.Join(reasons2, " and ")) + return + } + for _, r := range reasons2 { + *reasons = append(*reasons, fmt.Sprintf("subfields %q conflict because %s", a.Alias.Name, r)) + } + *locs = append(*locs, locs2...) + } + + case *types.InlineFragment: + for _, sel := range b.Selections { + c.validateOverlap(a, sel, reasons, locs) + } + + case *types.FragmentSpread: + if frag := c.doc.Fragments.Get(b.Name.Name); frag != nil { + for _, sel := range frag.Selections { + c.validateOverlap(a, sel, reasons, locs) + } + } + + default: + panic("unreachable") + } + + case *types.InlineFragment: + for _, sel := range a.Selections { + c.validateOverlap(sel, b, reasons, locs) + } + + case *types.FragmentSpread: + if frag := c.doc.Fragments.Get(a.Name.Name); frag != nil { + for _, sel := range frag.Selections { + c.validateOverlap(sel, b, reasons, locs) + } + } + + default: + panic("unreachable") + } +} + +func (c *context) validateFieldOverlap(a, b *types.Field) ([]string, []errors.Location) { + if a.Alias.Name != b.Alias.Name { + return nil, nil + } + + if asf := c.fieldMap[a].sf; asf != nil { + if bsf := c.fieldMap[b].sf; bsf != nil { + if !typesCompatible(asf.Type, bsf.Type) { + return []string{fmt.Sprintf("they return conflicting types %s and %s", asf.Type, bsf.Type)}, nil + } + } + } + + at := c.fieldMap[a].parent + bt := c.fieldMap[b].parent + if at == nil || bt == nil || at == bt { + if a.Name.Name != b.Name.Name { + return []string{fmt.Sprintf("%s and %s are different fields", a.Name.Name, b.Name.Name)}, nil + } + + if argumentsConflict(a.Arguments, b.Arguments) { + return []string{"they have differing arguments"}, nil + } + } + + var reasons []string + var locs []errors.Location + for _, a2 := range a.SelectionSet { + for _, b2 := range b.SelectionSet { + c.validateOverlap(a2, b2, &reasons, &locs) + } + } + return reasons, locs +} + +func argumentsConflict(a, b types.ArgumentList) bool { + if len(a) != len(b) { + return true + } + for _, argA := range a { + valB, ok := b.Get(argA.Name.Name) + if !ok || !reflect.DeepEqual(argA.Value.Deserialize(nil), valB.Deserialize(nil)) { + return true + } + } + return false +} + +func fields(t types.Type) types.FieldsDefinition { + switch t := t.(type) { + case *types.ObjectTypeDefinition: + return t.Fields + case *types.InterfaceTypeDefinition: + return t.Fields + default: + return nil + } +} + +func unwrapType(t types.Type) types.NamedType { + if t == nil { + return nil + } + for { + switch t2 := t.(type) { + case types.NamedType: + return t2 + case *types.List: + t = t2.OfType + case *types.NonNull: + t = t2.OfType + default: + panic("unreachable") + } + } +} + +func resolveType(c *context, t types.Type) types.Type { + t2, err := common.ResolveType(t, c.schema.Resolve) + if err != nil { + c.errs = append(c.errs, err) + } + return t2 +} + +func validateDirectives(c *opContext, loc string, directives types.DirectiveList) { + directiveNames := make(nameSet) + for _, d := range directives { + dirName := d.Name.Name + validateNameCustomMsg(c.context, directiveNames, d.Name, "UniqueDirectivesPerLocation", func() string { + return fmt.Sprintf("The directive %q can only be used once at this location.", dirName) + }) + + validateArgumentLiterals(c, d.Arguments) + + dd, ok := c.schema.Directives[dirName] + if !ok { + c.addErr(d.Name.Loc, "KnownDirectives", "Unknown directive %q.", dirName) + continue + } + + locOK := false + for _, allowedLoc := range dd.Locations { + if loc == allowedLoc { + locOK = true + break + } + } + if !locOK { + c.addErr(d.Name.Loc, "KnownDirectives", "Directive %q may not be used on %s.", dirName, loc) + } + + validateArgumentTypes(c, d.Arguments, dd.Arguments, d.Name.Loc, + func() string { return fmt.Sprintf("directive %q", "@"+dirName) }, + func() string { return fmt.Sprintf("Directive %q", "@"+dirName) }, + ) + } +} + +func validateName(c *context, set nameSet, name types.Ident, rule string, kind string) { + validateNameCustomMsg(c, set, name, rule, func() string { + return fmt.Sprintf("There can be only one %s named %q.", kind, name.Name) + }) +} + +func validateNameCustomMsg(c *context, set nameSet, name types.Ident, rule string, msg func() string) { + if loc, ok := set[name.Name]; ok { + c.addErrMultiLoc([]errors.Location{loc, name.Loc}, rule, msg()) + return + } + set[name.Name] = name.Loc +} + +func validateArgumentTypes(c *opContext, args types.ArgumentList, argDecls types.ArgumentsDefinition, loc errors.Location, owner1, owner2 func() string) { + for _, selArg := range args { + arg := argDecls.Get(selArg.Name.Name) + if arg == nil { + c.addErr(selArg.Name.Loc, "KnownArgumentNames", "Unknown argument %q on %s.", selArg.Name.Name, owner1()) + continue + } + value := selArg.Value + if ok, reason := validateValueType(c, value, arg.Type); !ok { + c.addErr(value.Location(), "ArgumentsOfCorrectType", "Argument %q has invalid value %s.\n%s", arg.Name.Name, value, reason) + } + } + for _, decl := range argDecls { + if _, ok := decl.Type.(*types.NonNull); ok { + if _, ok := args.Get(decl.Name.Name); !ok { + c.addErr(loc, "ProvidedNonNullArguments", "%s argument %q of type %q is required but not provided.", owner2(), decl.Name.Name, decl.Type) + } + } + } +} + +func validateArgumentLiterals(c *opContext, args types.ArgumentList) { + argNames := make(nameSet) + for _, arg := range args { + validateName(c.context, argNames, arg.Name, "UniqueArgumentNames", "argument") + validateLiteral(c, arg.Value) + } +} + +func validateLiteral(c *opContext, l types.Value) { + switch l := l.(type) { + case *types.ObjectValue: + fieldNames := make(nameSet) + for _, f := range l.Fields { + validateName(c.context, fieldNames, f.Name, "UniqueInputFieldNames", "input field") + validateLiteral(c, f.Value) + } + case *types.ListValue: + for _, entry := range l.Values { + validateLiteral(c, entry) + } + case *types.Variable: + for _, op := range c.ops { + v := op.Vars.Get(l.Name) + if v == nil { + byOp := "" + if op.Name.Name != "" { + byOp = fmt.Sprintf(" by operation %q", op.Name.Name) + } + c.opErrs[op] = append(c.opErrs[op], &errors.QueryError{ + Message: fmt.Sprintf("Variable %q is not defined%s.", "$"+l.Name, byOp), + Locations: []errors.Location{l.Loc, op.Loc}, + Rule: "NoUndefinedVariables", + }) + continue + } + validateValueType(c, l, resolveType(c.context, v.Type)) + c.usedVars[op][v] = struct{}{} + } + } +} + +func validateValueType(c *opContext, v types.Value, t types.Type) (bool, string) { + if v, ok := v.(*types.Variable); ok { + for _, op := range c.ops { + if v2 := op.Vars.Get(v.Name); v2 != nil { + t2, err := common.ResolveType(v2.Type, c.schema.Resolve) + if _, ok := t2.(*types.NonNull); !ok && v2.Default != nil { + t2 = &types.NonNull{OfType: t2} + } + if err == nil && !typeCanBeUsedAs(t2, t) { + c.addErrMultiLoc([]errors.Location{v2.Loc, v.Loc}, "VariablesInAllowedPosition", "Variable %q of type %q used in position expecting type %q.", "$"+v.Name, t2, t) + } + } + } + return true, "" + } + + if nn, ok := t.(*types.NonNull); ok { + if isNull(v) { + return false, fmt.Sprintf("Expected %q, found null.", t) + } + t = nn.OfType + } + if isNull(v) { + return true, "" + } + + switch t := t.(type) { + case *types.ScalarTypeDefinition, *types.EnumTypeDefinition: + if lit, ok := v.(*types.PrimitiveValue); ok { + if validateBasicLit(lit, t) { + return true, "" + } + return false, fmt.Sprintf("Expected type %q, found %s.", t, v) + } + return true, "" + + case *types.List: + list, ok := v.(*types.ListValue) + if !ok { + return validateValueType(c, v, t.OfType) // single value instead of list + } + for i, entry := range list.Values { + if ok, reason := validateValueType(c, entry, t.OfType); !ok { + return false, fmt.Sprintf("In element #%d: %s", i, reason) + } + } + return true, "" + + case *types.InputObject: + v, ok := v.(*types.ObjectValue) + if !ok { + return false, fmt.Sprintf("Expected %q, found not an object.", t) + } + for _, f := range v.Fields { + name := f.Name.Name + iv := t.Values.Get(name) + if iv == nil { + return false, fmt.Sprintf("In field %q: Unknown field.", name) + } + if ok, reason := validateValueType(c, f.Value, iv.Type); !ok { + return false, fmt.Sprintf("In field %q: %s", name, reason) + } + } + for _, iv := range t.Values { + found := false + for _, f := range v.Fields { + if f.Name.Name == iv.Name.Name { + found = true + break + } + } + if !found { + if _, ok := iv.Type.(*types.NonNull); ok && iv.Default == nil { + return false, fmt.Sprintf("In field %q: Expected %q, found null.", iv.Name.Name, iv.Type) + } + } + } + return true, "" + } + + return false, fmt.Sprintf("Expected type %q, found %s.", t, v) +} + +func validateBasicLit(v *types.PrimitiveValue, t types.Type) bool { + switch t := t.(type) { + case *types.ScalarTypeDefinition: + switch t.Name { + case "Int": + if v.Type != scanner.Int { + return false + } + f, err := strconv.ParseFloat(v.Text, 64) + if err != nil { + panic(err) + } + return f >= math.MinInt32 && f <= math.MaxInt32 + case "Float": + return v.Type == scanner.Int || v.Type == scanner.Float + case "String": + return v.Type == scanner.String + case "Boolean": + return v.Type == scanner.Ident && (v.Text == "true" || v.Text == "false") + case "ID": + return v.Type == scanner.Int || v.Type == scanner.String + default: + //TODO: Type-check against expected type by Unmarshalling + return true + } + + case *types.EnumTypeDefinition: + if v.Type != scanner.Ident { + return false + } + for _, option := range t.EnumValuesDefinition { + if option.EnumValue == v.Text { + return true + } + } + return false + } + + return false +} + +func canBeFragment(t types.Type) bool { + switch t.(type) { + case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union: + return true + default: + return false + } +} + +func canBeInput(t types.Type) bool { + switch t := t.(type) { + case *types.InputObject, *types.ScalarTypeDefinition, *types.EnumTypeDefinition: + return true + case *types.List: + return canBeInput(t.OfType) + case *types.NonNull: + return canBeInput(t.OfType) + default: + return false + } +} + +func hasSubfields(t types.Type) bool { + switch t := t.(type) { + case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union: + return true + case *types.List: + return hasSubfields(t.OfType) + case *types.NonNull: + return hasSubfields(t.OfType) + default: + return false + } +} + +func isLeaf(t types.Type) bool { + switch t.(type) { + case *types.ScalarTypeDefinition, *types.EnumTypeDefinition: + return true + default: + return false + } +} + +func isNull(lit interface{}) bool { + _, ok := lit.(*types.NullValue) + return ok +} + +func typesCompatible(a, b types.Type) bool { + al, aIsList := a.(*types.List) + bl, bIsList := b.(*types.List) + if aIsList || bIsList { + return aIsList && bIsList && typesCompatible(al.OfType, bl.OfType) + } + + ann, aIsNN := a.(*types.NonNull) + bnn, bIsNN := b.(*types.NonNull) + if aIsNN || bIsNN { + return aIsNN && bIsNN && typesCompatible(ann.OfType, bnn.OfType) + } + + if isLeaf(a) || isLeaf(b) { + return a == b + } + + return true +} + +func typeCanBeUsedAs(t, as types.Type) bool { + nnT, okT := t.(*types.NonNull) + if okT { + t = nnT.OfType + } + + nnAs, okAs := as.(*types.NonNull) + if okAs { + as = nnAs.OfType + if !okT { + return false // nullable can not be used as non-null + } + } + + if t == as { + return true + } + + if lT, ok := t.(*types.List); ok { + if lAs, ok := as.(*types.List); ok { + return typeCanBeUsedAs(lT.OfType, lAs.OfType) + } + } + return false +} diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection.go new file mode 100644 index 00000000..6877bcaf --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/introspection.go @@ -0,0 +1,118 @@ +package graphql + +import ( + "context" + "encoding/json" + + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/introspection" +) + +// Inspect allows inspection of the given schema. +func (s *Schema) Inspect() *introspection.Schema { + return introspection.WrapSchema(s.schema) +} + +// ToJSON encodes the schema in a JSON format used by tools like Relay. +func (s *Schema) ToJSON() ([]byte, error) { + result := s.exec(context.Background(), introspectionQuery, "", nil, &resolvable.Schema{ + Meta: s.res.Meta, + Query: &resolvable.Object{}, + Schema: *s.schema, + }) + if len(result.Errors) != 0 { + panic(result.Errors[0]) + } + return json.MarshalIndent(result.Data, "", "\t") +} + +var introspectionQuery = ` + query { + __schema { + queryType { name } + mutationType { name } + subscriptionType { name } + types { + ...FullType + } + directives { + name + description + locations + args { + ...InputValue + } + } + } + } + fragment FullType on __Type { + kind + name + description + fields(includeDeprecated: true) { + name + description + args { + ...InputValue + } + type { + ...TypeRef + } + isDeprecated + deprecationReason + } + inputFields { + ...InputValue + } + interfaces { + ...TypeRef + } + enumValues(includeDeprecated: true) { + name + description + isDeprecated + deprecationReason + } + possibleTypes { + ...TypeRef + } + } + fragment InputValue on __InputValue { + name + description + type { ...TypeRef } + defaultValue + } + fragment TypeRef on __Type { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + } + } + } + } + } + } + } + } +` diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go new file mode 100644 index 00000000..a0a2fa9b --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go @@ -0,0 +1,312 @@ +package introspection + +import ( + "sort" + + "github.com/graph-gophers/graphql-go/types" +) + +type Schema struct { + schema *types.Schema +} + +// WrapSchema is only used internally. +func WrapSchema(schema *types.Schema) *Schema { + return &Schema{schema} +} + +func (r *Schema) Types() []*Type { + var names []string + for name := range r.schema.Types { + names = append(names, name) + } + sort.Strings(names) + + l := make([]*Type, len(names)) + for i, name := range names { + l[i] = &Type{r.schema.Types[name]} + } + return l +} + +func (r *Schema) Directives() []*Directive { + var names []string + for name := range r.schema.Directives { + names = append(names, name) + } + sort.Strings(names) + + l := make([]*Directive, len(names)) + for i, name := range names { + l[i] = &Directive{r.schema.Directives[name]} + } + return l +} + +func (r *Schema) QueryType() *Type { + t, ok := r.schema.EntryPoints["query"] + if !ok { + return nil + } + return &Type{t} +} + +func (r *Schema) MutationType() *Type { + t, ok := r.schema.EntryPoints["mutation"] + if !ok { + return nil + } + return &Type{t} +} + +func (r *Schema) SubscriptionType() *Type { + t, ok := r.schema.EntryPoints["subscription"] + if !ok { + return nil + } + return &Type{t} +} + +type Type struct { + typ types.Type +} + +// WrapType is only used internally. +func WrapType(typ types.Type) *Type { + return &Type{typ} +} + +func (r *Type) Kind() string { + return r.typ.Kind() +} + +func (r *Type) Name() *string { + if named, ok := r.typ.(types.NamedType); ok { + name := named.TypeName() + return &name + } + return nil +} + +func (r *Type) Description() *string { + if named, ok := r.typ.(types.NamedType); ok { + desc := named.Description() + if desc == "" { + return nil + } + return &desc + } + return nil +} + +func (r *Type) Fields(args *struct{ IncludeDeprecated bool }) *[]*Field { + var fields types.FieldsDefinition + switch t := r.typ.(type) { + case *types.ObjectTypeDefinition: + fields = t.Fields + case *types.InterfaceTypeDefinition: + fields = t.Fields + default: + return nil + } + + var l []*Field + for _, f := range fields { + if d := f.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated { + l = append(l, &Field{field: f}) + } + } + return &l +} + +func (r *Type) Interfaces() *[]*Type { + t, ok := r.typ.(*types.ObjectTypeDefinition) + if !ok { + return nil + } + + l := make([]*Type, len(t.Interfaces)) + for i, intf := range t.Interfaces { + l[i] = &Type{intf} + } + return &l +} + +func (r *Type) PossibleTypes() *[]*Type { + var possibleTypes []*types.ObjectTypeDefinition + switch t := r.typ.(type) { + case *types.InterfaceTypeDefinition: + possibleTypes = t.PossibleTypes + case *types.Union: + possibleTypes = t.UnionMemberTypes + default: + return nil + } + + l := make([]*Type, len(possibleTypes)) + for i, intf := range possibleTypes { + l[i] = &Type{intf} + } + return &l +} + +func (r *Type) EnumValues(args *struct{ IncludeDeprecated bool }) *[]*EnumValue { + t, ok := r.typ.(*types.EnumTypeDefinition) + if !ok { + return nil + } + + var l []*EnumValue + for _, v := range t.EnumValuesDefinition { + if d := v.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated { + l = append(l, &EnumValue{v}) + } + } + return &l +} + +func (r *Type) InputFields() *[]*InputValue { + t, ok := r.typ.(*types.InputObject) + if !ok { + return nil + } + + l := make([]*InputValue, len(t.Values)) + for i, v := range t.Values { + l[i] = &InputValue{v} + } + return &l +} + +func (r *Type) OfType() *Type { + switch t := r.typ.(type) { + case *types.List: + return &Type{t.OfType} + case *types.NonNull: + return &Type{t.OfType} + default: + return nil + } +} + +type Field struct { + field *types.FieldDefinition +} + +func (r *Field) Name() string { + return r.field.Name +} + +func (r *Field) Description() *string { + if r.field.Desc == "" { + return nil + } + return &r.field.Desc +} + +func (r *Field) Args() []*InputValue { + l := make([]*InputValue, len(r.field.Arguments)) + for i, v := range r.field.Arguments { + l[i] = &InputValue{v} + } + return l +} + +func (r *Field) Type() *Type { + return &Type{r.field.Type} +} + +func (r *Field) IsDeprecated() bool { + return r.field.Directives.Get("deprecated") != nil +} + +func (r *Field) DeprecationReason() *string { + d := r.field.Directives.Get("deprecated") + if d == nil { + return nil + } + reason := d.Arguments.MustGet("reason").Deserialize(nil).(string) + return &reason +} + +type InputValue struct { + value *types.InputValueDefinition +} + +func (r *InputValue) Name() string { + return r.value.Name.Name +} + +func (r *InputValue) Description() *string { + if r.value.Desc == "" { + return nil + } + return &r.value.Desc +} + +func (r *InputValue) Type() *Type { + return &Type{r.value.Type} +} + +func (r *InputValue) DefaultValue() *string { + if r.value.Default == nil { + return nil + } + s := r.value.Default.String() + return &s +} + +type EnumValue struct { + value *types.EnumValueDefinition +} + +func (r *EnumValue) Name() string { + return r.value.EnumValue +} + +func (r *EnumValue) Description() *string { + if r.value.Desc == "" { + return nil + } + return &r.value.Desc +} + +func (r *EnumValue) IsDeprecated() bool { + return r.value.Directives.Get("deprecated") != nil +} + +func (r *EnumValue) DeprecationReason() *string { + d := r.value.Directives.Get("deprecated") + if d == nil { + return nil + } + reason := d.Arguments.MustGet("reason").Deserialize(nil).(string) + return &reason +} + +type Directive struct { + directive *types.DirectiveDefinition +} + +func (r *Directive) Name() string { + return r.directive.Name +} + +func (r *Directive) Description() *string { + if r.directive.Desc == "" { + return nil + } + return &r.directive.Desc +} + +func (r *Directive) Locations() []string { + return r.directive.Locations +} + +func (r *Directive) Args() []*InputValue { + l := make([]*InputValue, len(r.directive.Arguments)) + for i, v := range r.directive.Arguments { + l[i] = &InputValue{v} + } + return l +} diff --git a/vendor/github.com/graph-gophers/graphql-go/log/log.go b/vendor/github.com/graph-gophers/graphql-go/log/log.go new file mode 100644 index 00000000..bdada874 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/log/log.go @@ -0,0 +1,23 @@ +package log + +import ( + "context" + "log" + "runtime" +) + +// Logger is the interface used to log panics that occur during query execution. It is settable via graphql.ParseSchema +type Logger interface { + LogPanic(ctx context.Context, value interface{}) +} + +// DefaultLogger is the default logger used to log panics that occur during query execution +type DefaultLogger struct{} + +// LogPanic is used to log recovered panic values that occur during query execution +func (l *DefaultLogger) LogPanic(ctx context.Context, value interface{}) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + log.Printf("graphql: panic occurred: %v\n%s\ncontext: %v", value, buf, ctx) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/nullable_types.go b/vendor/github.com/graph-gophers/graphql-go/nullable_types.go new file mode 100644 index 00000000..fa5bbfd6 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/nullable_types.go @@ -0,0 +1,166 @@ +package graphql + +import ( + "fmt" + "math" +) + +// NullString is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullString struct { + Value *string + Set bool +} + +func (NullString) ImplementsGraphQLType(name string) bool { + return name == "String" +} + +func (s *NullString) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + switch v := input.(type) { + case string: + s.Value = &v + return nil + default: + return fmt.Errorf("wrong type for String: %T", v) + } +} + +func (s *NullString) Nullable() {} + +// NullBool is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullBool struct { + Value *bool + Set bool +} + +func (NullBool) ImplementsGraphQLType(name string) bool { + return name == "Boolean" +} + +func (s *NullBool) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + switch v := input.(type) { + case bool: + s.Value = &v + return nil + default: + return fmt.Errorf("wrong type for Boolean: %T", v) + } +} + +func (s *NullBool) Nullable() {} + +// NullInt is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullInt struct { + Value *int32 + Set bool +} + +func (NullInt) ImplementsGraphQLType(name string) bool { + return name == "Int" +} + +func (s *NullInt) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + switch v := input.(type) { + case int32: + s.Value = &v + return nil + case float64: + coerced := int32(v) + if v < math.MinInt32 || v > math.MaxInt32 || float64(coerced) != v { + return fmt.Errorf("not a 32-bit integer") + } + s.Value = &coerced + return nil + default: + return fmt.Errorf("wrong type for Int: %T", v) + } +} + +func (s *NullInt) Nullable() {} + +// NullFloat is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullFloat struct { + Value *float64 + Set bool +} + +func (NullFloat) ImplementsGraphQLType(name string) bool { + return name == "Float" +} + +func (s *NullFloat) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + switch v := input.(type) { + case float64: + s.Value = &v + return nil + case int32: + coerced := float64(v) + s.Value = &coerced + return nil + case int: + coerced := float64(v) + s.Value = &coerced + return nil + default: + return fmt.Errorf("wrong type for Float: %T", v) + } +} + +func (s *NullFloat) Nullable() {} + +// NullTime is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullTime struct { + Value *Time + Set bool +} + +func (NullTime) ImplementsGraphQLType(name string) bool { + return name == "Time" +} + +func (s *NullTime) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + s.Value = new(Time) + return s.Value.UnmarshalGraphQL(input) +} + +func (s *NullTime) Nullable() {} diff --git a/vendor/github.com/graph-gophers/graphql-go/subscriptions.go b/vendor/github.com/graph-gophers/graphql-go/subscriptions.go new file mode 100644 index 00000000..34064dc7 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/subscriptions.go @@ -0,0 +1,96 @@ +package graphql + +import ( + "context" + "errors" + + qerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/internal/exec" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/exec/selected" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/internal/validation" + "github.com/graph-gophers/graphql-go/introspection" +) + +// Subscribe returns a response channel for the given subscription with the schema's +// resolver. It returns an error if the schema was created without a resolver. +// If the context gets cancelled, the response channel will be closed and no +// further resolvers will be called. The context error will be returned as soon +// as possible (not immediately). +func (s *Schema) Subscribe(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) (<-chan interface{}, error) { + if !s.res.Resolver.IsValid() { + return nil, errors.New("schema created without resolver, can not subscribe") + } + if _, ok := s.schema.EntryPoints["subscription"]; !ok { + return nil, errors.New("no subscriptions are offered by the schema") + } + return s.subscribe(ctx, queryString, operationName, variables, s.res), nil +} + +func (s *Schema) subscribe(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) <-chan interface{} { + doc, qErr := query.Parse(queryString) + if qErr != nil { + return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{qErr}}) + } + + validationFinish := s.validationTracer.TraceValidation(ctx) + errs := validation.Validate(s.schema, doc, variables, s.maxDepth) + validationFinish(errs) + if len(errs) != 0 { + return sendAndReturnClosed(&Response{Errors: errs}) + } + + op, err := getOperation(doc, operationName) + if err != nil { + return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{qerrors.Errorf("%s", err)}}) + } + + r := &exec.Request{ + Request: selected.Request{ + Doc: doc, + Vars: variables, + Schema: s.schema, + }, + Limiter: make(chan struct{}, s.maxParallelism), + Tracer: s.tracer, + Logger: s.logger, + PanicHandler: s.panicHandler, + SubscribeResolverTimeout: s.subscribeResolverTimeout, + } + varTypes := make(map[string]*introspection.Type) + for _, v := range op.Vars { + t, err := common.ResolveType(v.Type, s.schema.Resolve) + if err != nil { + return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{err}}) + } + varTypes[v.Name.Name] = introspection.WrapType(t) + } + + if op.Type == query.Query || op.Type == query.Mutation { + data, errs := r.Execute(ctx, res, op) + return sendAndReturnClosed(&Response{Data: data, Errors: errs}) + } + + responses := r.Subscribe(ctx, res, op) + c := make(chan interface{}) + go func() { + for resp := range responses { + c <- &Response{ + Data: resp.Data, + Errors: resp.Errors, + } + } + close(c) + }() + + return c +} + +func sendAndReturnClosed(resp *Response) chan interface{} { + c := make(chan interface{}, 1) + c <- resp + close(c) + return c +} diff --git a/vendor/github.com/graph-gophers/graphql-go/time.go b/vendor/github.com/graph-gophers/graphql-go/time.go new file mode 100644 index 00000000..974287e7 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/time.go @@ -0,0 +1,64 @@ +package graphql + +import ( + "encoding/json" + "fmt" + "time" +) + +// Time is a custom GraphQL type to represent an instant in time. It has to be added to a schema +// via "scalar Time" since it is not a predeclared GraphQL type like "ID". +type Time struct { + time.Time +} + +// ImplementsGraphQLType maps this custom Go type +// to the graphql scalar type in the schema. +func (Time) ImplementsGraphQLType(name string) bool { + return name == "Time" +} + +// UnmarshalGraphQL is a custom unmarshaler for Time +// +// This function will be called whenever you use the +// time scalar as an input +func (t *Time) UnmarshalGraphQL(input interface{}) error { + switch input := input.(type) { + case time.Time: + t.Time = input + return nil + case string: + var err error + t.Time, err = time.Parse(time.RFC3339, input) + return err + case []byte: + var err error + t.Time, err = time.Parse(time.RFC3339, string(input)) + return err + case int32: + t.Time = time.Unix(int64(input), 0) + return nil + case int64: + if input >= 1e10 { + sec := input / 1e9 + nsec := input - (sec * 1e9) + t.Time = time.Unix(sec, nsec) + } else { + t.Time = time.Unix(input, 0) + } + return nil + case float64: + t.Time = time.Unix(int64(input), 0) + return nil + default: + return fmt.Errorf("wrong type for Time: %T", input) + } +} + +// MarshalJSON is a custom marshaler for Time +// +// This function will be called whenever you +// query for fields that use the Time type +func (t Time) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Time) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go new file mode 100644 index 00000000..8d5d8a71 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go @@ -0,0 +1,96 @@ +package trace + +import ( + "context" + "fmt" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/introspection" + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +type TraceQueryFinishFunc func([]*errors.QueryError) +type TraceFieldFinishFunc func(*errors.QueryError) + +type Tracer interface { + TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) + TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) +} + +type OpenTracingTracer struct{} + +func (OpenTracingTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) { + span, spanCtx := opentracing.StartSpanFromContext(ctx, "GraphQL request") + span.SetTag("graphql.query", queryString) + + if operationName != "" { + span.SetTag("graphql.operationName", operationName) + } + + if len(variables) != 0 { + span.LogFields(log.Object("graphql.variables", variables)) + } + + return spanCtx, func(errs []*errors.QueryError) { + if len(errs) > 0 { + msg := errs[0].Error() + if len(errs) > 1 { + msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1) + } + ext.Error.Set(span, true) + span.SetTag("graphql.error", msg) + } + span.Finish() + } +} + +func (OpenTracingTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) { + if trivial { + return ctx, noop + } + + span, spanCtx := opentracing.StartSpanFromContext(ctx, label) + span.SetTag("graphql.type", typeName) + span.SetTag("graphql.field", fieldName) + for name, value := range args { + span.SetTag("graphql.args."+name, value) + } + + return spanCtx, func(err *errors.QueryError) { + if err != nil { + ext.Error.Set(span, true) + span.SetTag("graphql.error", err.Error()) + } + span.Finish() + } +} + +func (OpenTracingTracer) TraceValidation(ctx context.Context) TraceValidationFinishFunc { + span, _ := opentracing.StartSpanFromContext(ctx, "Validate Query") + + return func(errs []*errors.QueryError) { + if len(errs) > 0 { + msg := errs[0].Error() + if len(errs) > 1 { + msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1) + } + ext.Error.Set(span, true) + span.SetTag("graphql.error", msg) + } + span.Finish() + } +} + +func noop(*errors.QueryError) {} + +type NoopTracer struct{} + +func (NoopTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) { + return ctx, func(errs []*errors.QueryError) {} +} + +func (NoopTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) { + return ctx, func(err *errors.QueryError) {} +} diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go new file mode 100644 index 00000000..bce7a9a4 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go @@ -0,0 +1,25 @@ +package trace + +import ( + "context" + + "github.com/graph-gophers/graphql-go/errors" +) + +type TraceValidationFinishFunc = TraceQueryFinishFunc + +// Deprecated: use ValidationTracerContext. +type ValidationTracer interface { + TraceValidation() TraceValidationFinishFunc +} + +type ValidationTracerContext interface { + TraceValidation(ctx context.Context) TraceValidationFinishFunc +} + +type NoopValidationTracer struct{} + +// Deprecated: use a Tracer which implements ValidationTracerContext. +func (NoopValidationTracer) TraceValidation() TraceValidationFinishFunc { + return func(errs []*errors.QueryError) {} +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/argument.go b/vendor/github.com/graph-gophers/graphql-go/types/argument.go new file mode 100644 index 00000000..b2681a28 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/argument.go @@ -0,0 +1,44 @@ +package types + +// Argument is a representation of the GraphQL Argument. +// +// https://spec.graphql.org/draft/#sec-Language.Arguments +type Argument struct { + Name Ident + Value Value +} + +// ArgumentList is a collection of GraphQL Arguments. +type ArgumentList []*Argument + +// Returns a Value in the ArgumentList by name. +func (l ArgumentList) Get(name string) (Value, bool) { + for _, arg := range l { + if arg.Name.Name == name { + return arg.Value, true + } + } + return nil, false +} + +// MustGet returns a Value in the ArgumentList by name. +// MustGet will panic if the argument name is not found in the ArgumentList. +func (l ArgumentList) MustGet(name string) Value { + value, ok := l.Get(name) + if !ok { + panic("argument not found") + } + return value +} + +type ArgumentsDefinition []*InputValueDefinition + +// Get returns an InputValueDefinition in the ArgumentsDefinition by name or nil if not found. +func (a ArgumentsDefinition) Get(name string) *InputValueDefinition { + for _, inputValue := range a { + if inputValue.Name.Name == name { + return inputValue + } + } + return nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/directive.go b/vendor/github.com/graph-gophers/graphql-go/types/directive.go new file mode 100644 index 00000000..0f8a4b99 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/directive.go @@ -0,0 +1,34 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// Directive is a representation of the GraphQL Directive. +// +// http://spec.graphql.org/draft/#sec-Language.Directives +type Directive struct { + Name Ident + Arguments ArgumentList +} + +// DirectiveDefinition is a representation of the GraphQL DirectiveDefinition. +// +// http://spec.graphql.org/draft/#sec-Type-System.Directives +type DirectiveDefinition struct { + Name string + Desc string + Locations []string + Arguments ArgumentsDefinition + Loc errors.Location +} + +type DirectiveList []*Directive + +// Returns the Directive in the DirectiveList by name or nil if not found. +func (l DirectiveList) Get(name string) *Directive { + for _, d := range l { + if d.Name.Name == name { + return d + } + } + return nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/doc.go b/vendor/github.com/graph-gophers/graphql-go/types/doc.go new file mode 100644 index 00000000..87caa60b --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/doc.go @@ -0,0 +1,9 @@ +/* + Package types represents all types from the GraphQL specification in code. + + + The names of the Go types, whenever possible, match 1:1 with the names from + the specification. + +*/ +package types diff --git a/vendor/github.com/graph-gophers/graphql-go/types/enum.go b/vendor/github.com/graph-gophers/graphql-go/types/enum.go new file mode 100644 index 00000000..b2c84caa --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/enum.go @@ -0,0 +1,32 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// EnumTypeDefinition defines a set of possible enum values. +// +// Like scalar types, an EnumTypeDefinition also represents a leaf value in a GraphQL type system. +// +// http://spec.graphql.org/draft/#sec-Enums +type EnumTypeDefinition struct { + Name string + EnumValuesDefinition []*EnumValueDefinition + Desc string + Directives DirectiveList + Loc errors.Location +} + +// EnumValueDefinition are unique values that may be serialized as a string: the name of the +// represented value. +// +// http://spec.graphql.org/draft/#EnumValueDefinition +type EnumValueDefinition struct { + EnumValue string + Directives DirectiveList + Desc string + Loc errors.Location +} + +func (*EnumTypeDefinition) Kind() string { return "ENUM" } +func (t *EnumTypeDefinition) String() string { return t.Name } +func (t *EnumTypeDefinition) TypeName() string { return t.Name } +func (t *EnumTypeDefinition) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/extension.go b/vendor/github.com/graph-gophers/graphql-go/types/extension.go new file mode 100644 index 00000000..b82ea670 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/extension.go @@ -0,0 +1,13 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// Extension type defines a GraphQL type extension. +// Schemas, Objects, Inputs and Scalars can be extended. +// +// https://spec.graphql.org/draft/#sec-Type-System-Extensions +type Extension struct { + Type NamedType + Directives DirectiveList + Loc errors.Location +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/field.go b/vendor/github.com/graph-gophers/graphql-go/types/field.go new file mode 100644 index 00000000..ea5bca5c --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/field.go @@ -0,0 +1,39 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// FieldDefinition is a representation of a GraphQL FieldDefinition. +// +// http://spec.graphql.org/draft/#FieldDefinition +type FieldDefinition struct { + Name string + Arguments ArgumentsDefinition + Type Type + Directives DirectiveList + Desc string + Loc errors.Location +} + +// FieldsDefinition is a list of an ObjectTypeDefinition's Fields. +// +// https://spec.graphql.org/draft/#FieldsDefinition +type FieldsDefinition []*FieldDefinition + +// Get returns a FieldDefinition in a FieldsDefinition by name or nil if not found. +func (l FieldsDefinition) Get(name string) *FieldDefinition { + for _, f := range l { + if f.Name == name { + return f + } + } + return nil +} + +// Names returns a slice of FieldDefinition names. +func (l FieldsDefinition) Names() []string { + names := make([]string, len(l)) + for i, f := range l { + names[i] = f.Name + } + return names +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/fragment.go b/vendor/github.com/graph-gophers/graphql-go/types/fragment.go new file mode 100644 index 00000000..606219ca --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/fragment.go @@ -0,0 +1,51 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +type Fragment struct { + On TypeName + Selections SelectionSet +} + +// InlineFragment is a representation of the GraphQL InlineFragment. +// +// http://spec.graphql.org/draft/#InlineFragment +type InlineFragment struct { + Fragment + Directives DirectiveList + Loc errors.Location +} + +// FragmentDefinition is a representation of the GraphQL FragmentDefinition. +// +// http://spec.graphql.org/draft/#FragmentDefinition +type FragmentDefinition struct { + Fragment + Name Ident + Directives DirectiveList + Loc errors.Location +} + +// FragmentSpread is a representation of the GraphQL FragmentSpread. +// +// http://spec.graphql.org/draft/#FragmentSpread +type FragmentSpread struct { + Name Ident + Directives DirectiveList + Loc errors.Location +} + +type FragmentList []*FragmentDefinition + +// Returns a FragmentDefinition by name or nil if not found. +func (l FragmentList) Get(name string) *FragmentDefinition { + for _, f := range l { + if f.Name.Name == name { + return f + } + } + return nil +} + +func (InlineFragment) isSelection() {} +func (FragmentSpread) isSelection() {} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/input.go b/vendor/github.com/graph-gophers/graphql-go/types/input.go new file mode 100644 index 00000000..c179bc3e --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/input.go @@ -0,0 +1,47 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// InputValueDefinition is a representation of the GraphQL InputValueDefinition. +// +// http://spec.graphql.org/draft/#InputValueDefinition +type InputValueDefinition struct { + Name Ident + Type Type + Default Value + Desc string + Directives DirectiveList + Loc errors.Location + TypeLoc errors.Location +} + +type InputValueDefinitionList []*InputValueDefinition + +// Returns an InputValueDefinition by name or nil if not found. +func (l InputValueDefinitionList) Get(name string) *InputValueDefinition { + for _, v := range l { + if v.Name.Name == name { + return v + } + } + return nil +} + +// InputObject types define a set of input fields; the input fields are either scalars, enums, or +// other input objects. +// +// This allows arguments to accept arbitrarily complex structs. +// +// http://spec.graphql.org/draft/#sec-Input-Objects +type InputObject struct { + Name string + Desc string + Values ArgumentsDefinition + Directives DirectiveList + Loc errors.Location +} + +func (*InputObject) Kind() string { return "INPUT_OBJECT" } +func (t *InputObject) String() string { return t.Name } +func (t *InputObject) TypeName() string { return t.Name } +func (t *InputObject) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/interface.go b/vendor/github.com/graph-gophers/graphql-go/types/interface.go new file mode 100644 index 00000000..e741e591 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/interface.go @@ -0,0 +1,25 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// InterfaceTypeDefinition recusrively defines list of named fields with their arguments via the +// implementation chain of interfaces. +// +// GraphQL objects can then implement these interfaces which requires that the object type will +// define all fields defined by those interfaces. +// +// http://spec.graphql.org/draft/#sec-Interfaces +type InterfaceTypeDefinition struct { + Name string + PossibleTypes []*ObjectTypeDefinition + Fields FieldsDefinition + Desc string + Directives DirectiveList + Loc errors.Location + Interfaces []*InterfaceTypeDefinition +} + +func (*InterfaceTypeDefinition) Kind() string { return "INTERFACE" } +func (t *InterfaceTypeDefinition) String() string { return t.Name } +func (t *InterfaceTypeDefinition) TypeName() string { return t.Name } +func (t *InterfaceTypeDefinition) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/object.go b/vendor/github.com/graph-gophers/graphql-go/types/object.go new file mode 100644 index 00000000..e65c79db --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/object.go @@ -0,0 +1,25 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// ObjectTypeDefinition represents a GraphQL ObjectTypeDefinition. +// +// type FooObject { +// foo: String +// } +// +// https://spec.graphql.org/draft/#sec-Objects +type ObjectTypeDefinition struct { + Name string + Interfaces []*InterfaceTypeDefinition + Fields FieldsDefinition + Desc string + Directives DirectiveList + InterfaceNames []string + Loc errors.Location +} + +func (*ObjectTypeDefinition) Kind() string { return "OBJECT" } +func (t *ObjectTypeDefinition) String() string { return t.Name } +func (t *ObjectTypeDefinition) TypeName() string { return t.Name } +func (t *ObjectTypeDefinition) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/query.go b/vendor/github.com/graph-gophers/graphql-go/types/query.go new file mode 100644 index 00000000..caca6ef4 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/query.go @@ -0,0 +1,62 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// ExecutableDefinition represents a set of operations or fragments that can be executed +// against a schema. +// +// http://spec.graphql.org/draft/#ExecutableDefinition +type ExecutableDefinition struct { + Operations OperationList + Fragments FragmentList +} + +// OperationDefinition represents a GraphQL Operation. +// +// https://spec.graphql.org/draft/#sec-Language.Operations +type OperationDefinition struct { + Type OperationType + Name Ident + Vars ArgumentsDefinition + Selections SelectionSet + Directives DirectiveList + Loc errors.Location +} + +type OperationType string + +// A Selection is a field requested in a GraphQL operation. +// +// http://spec.graphql.org/draft/#Selection +type Selection interface { + isSelection() +} + +// A SelectionSet represents a collection of Selections +// +// http://spec.graphql.org/draft/#sec-Selection-Sets +type SelectionSet []Selection + +// Field represents a field used in a query. +type Field struct { + Alias Ident + Name Ident + Arguments ArgumentList + Directives DirectiveList + SelectionSet SelectionSet + SelectionSetLoc errors.Location +} + +func (Field) isSelection() {} + +type OperationList []*OperationDefinition + +// Get returns an OperationDefinition by name or nil if not found. +func (l OperationList) Get(name string) *OperationDefinition { + for _, f := range l { + if f.Name.Name == name { + return f + } + } + return nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/scalar.go b/vendor/github.com/graph-gophers/graphql-go/types/scalar.go new file mode 100644 index 00000000..5bd529a8 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/scalar.go @@ -0,0 +1,22 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// ScalarTypeDefinition types represent primitive leaf values (e.g. a string or an integer) in a GraphQL type +// system. +// +// GraphQL responses take the form of a hierarchical tree; the leaves on these trees are GraphQL +// scalars. +// +// http://spec.graphql.org/draft/#sec-Scalars +type ScalarTypeDefinition struct { + Name string + Desc string + Directives DirectiveList + Loc errors.Location +} + +func (*ScalarTypeDefinition) Kind() string { return "SCALAR" } +func (t *ScalarTypeDefinition) String() string { return t.Name } +func (t *ScalarTypeDefinition) TypeName() string { return t.Name } +func (t *ScalarTypeDefinition) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/schema.go b/vendor/github.com/graph-gophers/graphql-go/types/schema.go new file mode 100644 index 00000000..06811a97 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/schema.go @@ -0,0 +1,42 @@ +package types + +// Schema represents a GraphQL service's collective type system capabilities. +// A schema is defined in terms of the types and directives it supports as well as the root +// operation types for each kind of operation: `query`, `mutation`, and `subscription`. +// +// For a more formal definition, read the relevant section in the specification: +// +// http://spec.graphql.org/draft/#sec-Schema +type Schema struct { + // EntryPoints determines the place in the type system where `query`, `mutation`, and + // `subscription` operations begin. + // + // http://spec.graphql.org/draft/#sec-Root-Operation-Types + // + EntryPoints map[string]NamedType + + // Types are the fundamental unit of any GraphQL schema. + // There are six kinds of named types, and two wrapping types. + // + // http://spec.graphql.org/draft/#sec-Types + Types map[string]NamedType + + // Directives are used to annotate various parts of a GraphQL document as an indicator that they + // should be evaluated differently by a validator, executor, or client tool such as a code + // generator. + // + // http://spec.graphql.org/#sec-Type-System.Directives + Directives map[string]*DirectiveDefinition + + UseFieldResolvers bool + + EntryPointNames map[string]string + Objects []*ObjectTypeDefinition + Unions []*Union + Enums []*EnumTypeDefinition + Extensions []*Extension +} + +func (s *Schema) Resolve(name string) Type { + return s.Types[name] +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/types.go b/vendor/github.com/graph-gophers/graphql-go/types/types.go new file mode 100644 index 00000000..df34d08a --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/types.go @@ -0,0 +1,63 @@ +package types + +import ( + "github.com/graph-gophers/graphql-go/errors" +) + +// TypeName is a base building block for GraphQL type references. +type TypeName struct { + Ident +} + +// NamedType represents a type with a name. +// +// http://spec.graphql.org/draft/#NamedType +type NamedType interface { + Type + TypeName() string + Description() string +} + +type Ident struct { + Name string + Loc errors.Location +} + +type Type interface { + // Kind returns one possible GraphQL type kind. A type kind must be + // valid as defined by the GraphQL spec. + // + // https://spec.graphql.org/draft/#sec-Type-Kinds + Kind() string + + // String serializes a Type into a GraphQL specification format type. + // + // http://spec.graphql.org/draft/#sec-Serialization-Format + String() string +} + +// List represents a GraphQL ListType. +// +// http://spec.graphql.org/draft/#ListType +type List struct { + // OfType represents the inner-type of a List type. + // For example, the List type `[Foo]` has an OfType of Foo. + OfType Type +} + +// NonNull represents a GraphQL NonNullType. +// +// https://spec.graphql.org/draft/#NonNullType +type NonNull struct { + // OfType represents the inner-type of a NonNull type. + // For example, the NonNull type `Foo!` has an OfType of Foo. + OfType Type +} + +func (*List) Kind() string { return "LIST" } +func (*NonNull) Kind() string { return "NON_NULL" } +func (*TypeName) Kind() string { panic("TypeName needs to be resolved to actual type") } + +func (t *List) String() string { return "[" + t.OfType.String() + "]" } +func (t *NonNull) String() string { return t.OfType.String() + "!" } +func (*TypeName) String() string { panic("TypeName needs to be resolved to actual type") } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/union.go b/vendor/github.com/graph-gophers/graphql-go/types/union.go new file mode 100644 index 00000000..bb916673 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/union.go @@ -0,0 +1,24 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// Union types represent objects that could be one of a list of GraphQL object types, but provides no +// guaranteed fields between those types. +// +// They also differ from interfaces in that object types declare what interfaces they implement, but +// are not aware of what unions contain them. +// +// http://spec.graphql.org/draft/#sec-Unions +type Union struct { + Name string + UnionMemberTypes []*ObjectTypeDefinition + Desc string + Directives DirectiveList + TypeNames []string + Loc errors.Location +} + +func (*Union) Kind() string { return "UNION" } +func (t *Union) String() string { return t.Name } +func (t *Union) TypeName() string { return t.Name } +func (t *Union) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/value.go b/vendor/github.com/graph-gophers/graphql-go/types/value.go new file mode 100644 index 00000000..9f8d041a --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/value.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "strings" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" +) + +// Value represents a literal input or literal default value in the GraphQL Specification. +// +// http://spec.graphql.org/draft/#sec-Input-Values +type Value interface { + // Deserialize transforms a GraphQL specification format literal into a Go type. + Deserialize(vars map[string]interface{}) interface{} + + // String serializes a Value into a GraphQL specification format literal. + String() string + Location() errors.Location +} + +// PrimitiveValue represents one of the following GraphQL scalars: Int, Float, +// String, or Boolean +type PrimitiveValue struct { + Type rune + Text string + Loc errors.Location +} + +func (val *PrimitiveValue) Deserialize(vars map[string]interface{}) interface{} { + switch val.Type { + case scanner.Int: + value, err := strconv.ParseInt(val.Text, 10, 32) + if err != nil { + panic(err) + } + return int32(value) + + case scanner.Float: + value, err := strconv.ParseFloat(val.Text, 64) + if err != nil { + panic(err) + } + return value + + case scanner.String: + value, err := strconv.Unquote(val.Text) + if err != nil { + panic(err) + } + return value + + case scanner.Ident: + switch val.Text { + case "true": + return true + case "false": + return false + default: + return val.Text + } + + default: + panic("invalid literal value") + } +} + +func (val *PrimitiveValue) String() string { return val.Text } +func (val *PrimitiveValue) Location() errors.Location { return val.Loc } + +// ListValue represents a literal list Value in the GraphQL specification. +// +// http://spec.graphql.org/draft/#sec-List-Value +type ListValue struct { + Values []Value + Loc errors.Location +} + +func (val *ListValue) Deserialize(vars map[string]interface{}) interface{} { + entries := make([]interface{}, len(val.Values)) + for i, entry := range val.Values { + entries[i] = entry.Deserialize(vars) + } + return entries +} + +func (val *ListValue) String() string { + entries := make([]string, len(val.Values)) + for i, entry := range val.Values { + entries[i] = entry.String() + } + return "[" + strings.Join(entries, ", ") + "]" +} + +func (val *ListValue) Location() errors.Location { return val.Loc } + +// ObjectValue represents a literal object Value in the GraphQL specification. +// +// http://spec.graphql.org/draft/#sec-Object-Value +type ObjectValue struct { + Fields []*ObjectField + Loc errors.Location +} + +// ObjectField represents field/value pairs in a literal ObjectValue. +type ObjectField struct { + Name Ident + Value Value +} + +func (val *ObjectValue) Deserialize(vars map[string]interface{}) interface{} { + fields := make(map[string]interface{}, len(val.Fields)) + for _, f := range val.Fields { + fields[f.Name.Name] = f.Value.Deserialize(vars) + } + return fields +} + +func (val *ObjectValue) String() string { + entries := make([]string, 0, len(val.Fields)) + for _, f := range val.Fields { + entries = append(entries, f.Name.Name+": "+f.Value.String()) + } + return "{" + strings.Join(entries, ", ") + "}" +} + +func (val *ObjectValue) Location() errors.Location { + return val.Loc +} + +// NullValue represents a literal `null` Value in the GraphQL specification. +// +// http://spec.graphql.org/draft/#sec-Null-Value +type NullValue struct { + Loc errors.Location +} + +func (val *NullValue) Deserialize(vars map[string]interface{}) interface{} { return nil } +func (val *NullValue) String() string { return "null" } +func (val *NullValue) Location() errors.Location { return val.Loc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/variable.go b/vendor/github.com/graph-gophers/graphql-go/types/variable.go new file mode 100644 index 00000000..1a4e2a51 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/variable.go @@ -0,0 +1,15 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// Variable is used in GraphQL operations to parameterize an input value. +// +// http://spec.graphql.org/draft/#Variable +type Variable struct { + Name string + Loc errors.Location +} + +func (v Variable) Deserialize(vars map[string]interface{}) interface{} { return vars[v.Name] } +func (v Variable) String() string { return "$" + v.Name } +func (v *Variable) Location() errors.Location { return v.Loc } diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE deleted file mode 100644 index c33dcc7c..00000000 --- a/vendor/github.com/hashicorp/errwrap/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md deleted file mode 100644 index 444df08f..00000000 --- a/vendor/github.com/hashicorp/errwrap/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# errwrap - -`errwrap` is a package for Go that formalizes the pattern of wrapping errors -and checking if an error contains another error. - -There is a common pattern in Go of taking a returned `error` value and -then wrapping it (such as with `fmt.Errorf`) before returning it. The problem -with this pattern is that you completely lose the original `error` structure. - -Arguably the _correct_ approach is that you should make a custom structure -implementing the `error` interface, and have the original error as a field -on that structure, such [as this example](http://golang.org/pkg/os/#PathError). -This is a good approach, but you have to know the entire chain of possible -rewrapping that happens, when you might just care about one. - -`errwrap` formalizes this pattern (it doesn't matter what approach you use -above) by giving a single interface for wrapping errors, checking if a specific -error is wrapped, and extracting that error. - -## Installation and Docs - -Install using `go get github.com/hashicorp/errwrap`. - -Full documentation is available at -http://godoc.org/github.com/hashicorp/errwrap - -## Usage - -#### Basic Usage - -Below is a very basic example of its usage: - -```go -// A function that always returns an error, but wraps it, like a real -// function might. -func tryOpen() error { - _, err := os.Open("/i/dont/exist") - if err != nil { - return errwrap.Wrapf("Doesn't exist: {{err}}", err) - } - - return nil -} - -func main() { - err := tryOpen() - - // We can use the Contains helpers to check if an error contains - // another error. It is safe to do this with a nil error, or with - // an error that doesn't even use the errwrap package. - if errwrap.Contains(err, "does not exist") { - // Do something - } - if errwrap.ContainsType(err, new(os.PathError)) { - // Do something - } - - // Or we can use the associated `Get` functions to just extract - // a specific error. This would return nil if that specific error doesn't - // exist. - perr := errwrap.GetType(err, new(os.PathError)) -} -``` - -#### Custom Types - -If you're already making custom types that properly wrap errors, then -you can get all the functionality of `errwraps.Contains` and such by -implementing the `Wrapper` interface with just one function. Example: - -```go -type AppError { - Code ErrorCode - Err error -} - -func (e *AppError) WrappedErrors() []error { - return []error{e.Err} -} -``` - -Now this works: - -```go -err := &AppError{Err: fmt.Errorf("an error")} -if errwrap.ContainsType(err, fmt.Errorf("")) { - // This will work! -} -``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go deleted file mode 100644 index 44e368e5..00000000 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ /dev/null @@ -1,178 +0,0 @@ -// Package errwrap implements methods to formalize error wrapping in Go. -// -// All of the top-level functions that take an `error` are built to be able -// to take any error, not just wrapped errors. This allows you to use errwrap -// without having to type-check and type-cast everywhere. -package errwrap - -import ( - "errors" - "reflect" - "strings" -) - -// WalkFunc is the callback called for Walk. -type WalkFunc func(error) - -// Wrapper is an interface that can be implemented by custom types to -// have all the Contains, Get, etc. functions in errwrap work. -// -// When Walk reaches a Wrapper, it will call the callback for every -// wrapped error in addition to the wrapper itself. Since all the top-level -// functions in errwrap use Walk, this means that all those functions work -// with your custom type. -type Wrapper interface { - WrappedErrors() []error -} - -// Wrap defines that outer wraps inner, returning an error type that -// can be cleanly used with the other methods in this package, such as -// Contains, GetAll, etc. -// -// This function won't modify the error message at all (the outer message -// will be used). -func Wrap(outer, inner error) error { - return &wrappedError{ - Outer: outer, - Inner: inner, - } -} - -// Wrapf wraps an error with a formatting message. This is similar to using -// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap -// errors, you should replace it with this. -// -// format is the format of the error message. The string '{{err}}' will -// be replaced with the original error message. -// -// Deprecated: Use fmt.Errorf() -func Wrapf(format string, err error) error { - outerMsg := "" - if err != nil { - outerMsg = err.Error() - } - - outer := errors.New(strings.Replace( - format, "{{err}}", outerMsg, -1)) - - return Wrap(outer, err) -} - -// Contains checks if the given error contains an error with the -// message msg. If err is not a wrapped error, this will always return -// false unless the error itself happens to match this msg. -func Contains(err error, msg string) bool { - return len(GetAll(err, msg)) > 0 -} - -// ContainsType checks if the given error contains an error with -// the same concrete type as v. If err is not a wrapped error, this will -// check the err itself. -func ContainsType(err error, v interface{}) bool { - return len(GetAllType(err, v)) > 0 -} - -// Get is the same as GetAll but returns the deepest matching error. -func Get(err error, msg string) error { - es := GetAll(err, msg) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetType is the same as GetAllType but returns the deepest matching error. -func GetType(err error, v interface{}) error { - es := GetAllType(err, v) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetAll gets all the errors that might be wrapped in err with the -// given message. The order of the errors is such that the outermost -// matching error (the most recent wrap) is index zero, and so on. -func GetAll(err error, msg string) []error { - var result []error - - Walk(err, func(err error) { - if err.Error() == msg { - result = append(result, err) - } - }) - - return result -} - -// GetAllType gets all the errors that are the same type as v. -// -// The order of the return value is the same as described in GetAll. -func GetAllType(err error, v interface{}) []error { - var result []error - - var search string - if v != nil { - search = reflect.TypeOf(v).String() - } - Walk(err, func(err error) { - var needle string - if err != nil { - needle = reflect.TypeOf(err).String() - } - - if needle == search { - result = append(result, err) - } - }) - - return result -} - -// Walk walks all the wrapped errors in err and calls the callback. If -// err isn't a wrapped error, this will be called once for err. If err -// is a wrapped error, the callback will be called for both the wrapper -// that implements error as well as the wrapped error itself. -func Walk(err error, cb WalkFunc) { - if err == nil { - return - } - - switch e := err.(type) { - case *wrappedError: - cb(e.Outer) - Walk(e.Inner, cb) - case Wrapper: - cb(err) - - for _, err := range e.WrappedErrors() { - Walk(err, cb) - } - case interface{ Unwrap() error }: - cb(err) - Walk(e.Unwrap(), cb) - default: - cb(err) - } -} - -// wrappedError is an implementation of error that has both the -// outer and inner errors. -type wrappedError struct { - Outer error - Inner error -} - -func (w *wrappedError) Error() string { - return w.Outer.Error() -} - -func (w *wrappedError) WrappedErrors() []error { - return []error{w.Outer, w.Inner} -} - -func (w *wrappedError) Unwrap() error { - return w.Inner -} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE deleted file mode 100644 index 82b4de97..00000000 --- a/vendor/github.com/hashicorp/go-multierror/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile deleted file mode 100644 index b97cd6ed..00000000 --- a/vendor/github.com/hashicorp/go-multierror/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -TEST?=./... - -default: test - -# test runs the test suite and vets the code. -test: generate - @echo "==> Running tests..." - @go list $(TEST) \ - | grep -v "/vendor/" \ - | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} - -# testrace runs the race checker -testrace: generate - @echo "==> Running tests (race)..." - @go list $(TEST) \ - | grep -v "/vendor/" \ - | xargs -n1 go test -timeout=60s -race ${TESTARGS} - -# updatedeps installs all the dependencies needed to run and build. -updatedeps: - @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" - -# generate runs `go generate` to build the dynamically generated source files. -generate: - @echo "==> Generating..." - @find . -type f -name '.DS_Store' -delete - @go list ./... \ - | grep -v "/vendor/" \ - | xargs -n1 go generate - -.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md deleted file mode 100644 index 71dd308e..00000000 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# go-multierror - -[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) -[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) -![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) - -[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror -[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror - -`go-multierror` is a package for Go that provides a mechanism for -representing a list of `error` values as a single `error`. - -This allows a function in Go to return an `error` that might actually -be a list of errors. If the caller knows this, they can unwrap the -list and access the errors. If the caller doesn't know, the error -formats to a nice human-readable format. - -`go-multierror` is fully compatible with the Go standard library -[errors](https://golang.org/pkg/errors/) package, including the -functions `As`, `Is`, and `Unwrap`. This provides a standardized approach -for introspecting on error values. - -## Installation and Docs - -Install using `go get github.com/hashicorp/go-multierror`. - -Full documentation is available at -https://pkg.go.dev/github.com/hashicorp/go-multierror - -### Requires go version 1.13 or newer - -`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced -[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which -this library takes advantage of. - -If you need to use an earlier version of go, you can use the -[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -tag, which doesn't rely on features in go 1.13. - -If you see compile errors that look like the below, it's likely that -you're on an older version of go: - -``` -/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As -/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is -``` - -## Usage - -go-multierror is easy to use and purposely built to be unobtrusive in -existing Go applications/libraries that may not be aware of it. - -**Building a list of errors** - -The `Append` function is used to create a list of errors. This function -behaves a lot like the Go built-in `append` function: it doesn't matter -if the first argument is nil, a `multierror.Error`, or any other `error`, -the function behaves as you would expect. - -```go -var result error - -if err := step1(); err != nil { - result = multierror.Append(result, err) -} -if err := step2(); err != nil { - result = multierror.Append(result, err) -} - -return result -``` - -**Customizing the formatting of the errors** - -By specifying a custom `ErrorFormat`, you can customize the format -of the `Error() string` function: - -```go -var result *multierror.Error - -// ... accumulate errors here, maybe using Append - -if result != nil { - result.ErrorFormat = func([]error) string { - return "errors!" - } -} -``` - -**Accessing the list of errors** - -`multierror.Error` implements `error` so if the caller doesn't know about -multierror, it will work just fine. But if you're aware a multierror might -be returned, you can use type switches to access the list of errors: - -```go -if err := something(); err != nil { - if merr, ok := err.(*multierror.Error); ok { - // Use merr.Errors - } -} -``` - -You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) -function. This will continue to unwrap into subsequent errors until none exist. - -**Extracting an error** - -The standard library [`errors.As`](https://golang.org/pkg/errors/#As) -function can be used directly with a multierror to extract a specific error: - -```go -// Assume err is a multierror value -err := somefunc() - -// We want to know if "err" has a "RichErrorType" in it and extract it. -var errRich RichErrorType -if errors.As(err, &errRich) { - // It has it, and now errRich is populated. -} -``` - -**Checking for an exact error value** - -Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) -error in the `os` package. You can check if this error is present by using -the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. - -```go -// Assume err is a multierror value -err := somefunc() -if errors.Is(err, os.ErrNotExist) { - // err contains os.ErrNotExist -} -``` - -**Returning a multierror only if there are errors** - -If you build a `multierror.Error`, you can use the `ErrorOrNil` function -to return an `error` implementation only if there are errors to return: - -```go -var result *multierror.Error - -// ... accumulate errors here - -// Return the `error` only if errors were added to the multierror, otherwise -// return nil since there are no errors. -return result.ErrorOrNil() -``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go deleted file mode 100644 index 3e2589bf..00000000 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ /dev/null @@ -1,43 +0,0 @@ -package multierror - -// Append is a helper function that will append more errors -// onto an Error in order to create a larger multi-error. -// -// If err is not a multierror.Error, then it will be turned into -// one. If any of the errs are multierr.Error, they will be flattened -// one level into err. -// Any nil errors within errs will be ignored. If err is nil, a new -// *Error will be returned. -func Append(err error, errs ...error) *Error { - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Go through each error and flatten - for _, e := range errs { - switch e := e.(type) { - case *Error: - if e != nil { - err.Errors = append(err.Errors, e.Errors...) - } - default: - if e != nil { - err.Errors = append(err.Errors, e) - } - } - } - - return err - default: - newErrs := make([]error, 0, len(errs)+1) - if err != nil { - newErrs = append(newErrs, err) - } - newErrs = append(newErrs, errs...) - - return Append(&Error{}, newErrs...) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go deleted file mode 100644 index aab8e9ab..00000000 --- a/vendor/github.com/hashicorp/go-multierror/flatten.go +++ /dev/null @@ -1,26 +0,0 @@ -package multierror - -// Flatten flattens the given error, merging any *Errors together into -// a single *Error. -func Flatten(err error) error { - // If it isn't an *Error, just return the error as-is - if _, ok := err.(*Error); !ok { - return err - } - - // Otherwise, make the result and flatten away! - flatErr := new(Error) - flatten(err, flatErr) - return flatErr -} - -func flatten(err error, flatErr *Error) { - switch err := err.(type) { - case *Error: - for _, e := range err.Errors { - flatten(e, flatErr) - } - default: - flatErr.Errors = append(flatErr.Errors, err) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go deleted file mode 100644 index 47f13c49..00000000 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ /dev/null @@ -1,27 +0,0 @@ -package multierror - -import ( - "fmt" - "strings" -) - -// ErrorFormatFunc is a function callback that is called by Error to -// turn the list of errors into a string. -type ErrorFormatFunc func([]error) string - -// ListFormatFunc is a basic formatter that outputs the number of errors -// that occurred along with a bullet point list of the errors. -func ListFormatFunc(es []error) string { - if len(es) == 1 { - return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) - } - - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d errors occurred:\n\t%s\n\n", - len(es), strings.Join(points, "\n\t")) -} diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go deleted file mode 100644 index 9c29efb7..00000000 --- a/vendor/github.com/hashicorp/go-multierror/group.go +++ /dev/null @@ -1,38 +0,0 @@ -package multierror - -import "sync" - -// Group is a collection of goroutines which return errors that need to be -// coalesced. -type Group struct { - mutex sync.Mutex - err *Error - wg sync.WaitGroup -} - -// Go calls the given function in a new goroutine. -// -// If the function returns an error it is added to the group multierror which -// is returned by Wait. -func (g *Group) Go(f func() error) { - g.wg.Add(1) - - go func() { - defer g.wg.Done() - - if err := f(); err != nil { - g.mutex.Lock() - g.err = Append(g.err, err) - g.mutex.Unlock() - } - }() -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the multierror. -func (g *Group) Wait() *Error { - g.wg.Wait() - g.mutex.Lock() - defer g.mutex.Unlock() - return g.err -} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go deleted file mode 100644 index f5457432..00000000 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ /dev/null @@ -1,121 +0,0 @@ -package multierror - -import ( - "errors" - "fmt" -) - -// Error is an error type to track multiple errors. This is used to -// accumulate errors in cases and return them as a single "error". -type Error struct { - Errors []error - ErrorFormat ErrorFormatFunc -} - -func (e *Error) Error() string { - fn := e.ErrorFormat - if fn == nil { - fn = ListFormatFunc - } - - return fn(e.Errors) -} - -// ErrorOrNil returns an error interface if this Error represents -// a list of errors, or returns nil if the list of errors is empty. This -// function is useful at the end of accumulation to make sure that the value -// returned represents the existence of errors. -func (e *Error) ErrorOrNil() error { - if e == nil { - return nil - } - if len(e.Errors) == 0 { - return nil - } - - return e -} - -func (e *Error) GoString() string { - return fmt.Sprintf("*%#v", *e) -} - -// WrappedErrors returns the list of errors that this Error is wrapping. It is -// an implementation of the errwrap.Wrapper interface so that multierror.Error -// can be used with that library. -// -// This method is not safe to be called concurrently. Unlike accessing the -// Errors field directly, this function also checks if the multierror is nil to -// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - return e.Errors -} - -// Unwrap returns an error from Error (or nil if there are no errors). -// This error returned will further support Unwrap to get the next error, -// etc. The order will match the order of Errors in the multierror.Error -// at the time of calling. -// -// The resulting error supports errors.As/Is/Unwrap so you can continue -// to use the stdlib errors package to introspect further. -// -// This will perform a shallow copy of the errors slice. Any errors appended -// to this error after calling Unwrap will not be available until a new -// Unwrap is called on the multierror.Error. -func (e *Error) Unwrap() error { - // If we have no errors then we do nothing - if e == nil || len(e.Errors) == 0 { - return nil - } - - // If we have exactly one error, we can just return that directly. - if len(e.Errors) == 1 { - return e.Errors[0] - } - - // Shallow copy the slice - errs := make([]error, len(e.Errors)) - copy(errs, e.Errors) - return chain(errs) -} - -// chain implements the interfaces necessary for errors.Is/As/Unwrap to -// work in a deterministic way with multierror. A chain tracks a list of -// errors while accounting for the current represented error. This lets -// Is/As be meaningful. -// -// Unwrap returns the next error. In the cleanest form, Unwrap would return -// the wrapped error here but we can't do that if we want to properly -// get access to all the errors. Instead, users are recommended to use -// Is/As to get the correct error type out. -// -// Precondition: []error is non-empty (len > 0) -type chain []error - -// Error implements the error interface -func (e chain) Error() string { - return e[0].Error() -} - -// Unwrap implements errors.Unwrap by returning the next error in the -// chain or nil if there are no more errors. -func (e chain) Unwrap() error { - if len(e) == 1 { - return nil - } - - return e[1:] -} - -// As implements errors.As by attempting to map to the current value. -func (e chain) As(target interface{}) bool { - return errors.As(e[0], target) -} - -// Is implements errors.Is by comparing the current value directly. -func (e chain) Is(target error) bool { - return errors.Is(e[0], target) -} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go deleted file mode 100644 index 5c477abe..00000000 --- a/vendor/github.com/hashicorp/go-multierror/prefix.go +++ /dev/null @@ -1,37 +0,0 @@ -package multierror - -import ( - "fmt" - - "github.com/hashicorp/errwrap" -) - -// Prefix is a helper function that will prefix some text -// to the given error. If the error is a multierror.Error, then -// it will be prefixed to each wrapped error. -// -// This is useful to use when appending multiple multierrors -// together in order to give better scoping. -func Prefix(err error, prefix string) error { - if err == nil { - return nil - } - - format := fmt.Sprintf("%s {{err}}", prefix) - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Wrap each of the errors - for i, e := range err.Errors { - err.Errors[i] = errwrap.Wrapf(format, e) - } - - return err - default: - return errwrap.Wrapf(format, err) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go deleted file mode 100644 index fecb14e8..00000000 --- a/vendor/github.com/hashicorp/go-multierror/sort.go +++ /dev/null @@ -1,16 +0,0 @@ -package multierror - -// Len implements sort.Interface function for length -func (err Error) Len() int { - return len(err.Errors) -} - -// Swap implements sort.Interface function for swapping elements -func (err Error) Swap(i, j int) { - err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] -} - -// Less implements sort.Interface function for determining order -func (err Error) Less(i, j int) bool { - return err.Errors[i].Error() < err.Errors[j].Error() -} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000..87d55747 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore new file mode 100644 index 00000000..3a89c6e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/.gitignore @@ -0,0 +1,15 @@ +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE new file mode 100644 index 00000000..1d2d645b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md new file mode 100644 index 00000000..11979345 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -0,0 +1,937 @@ +# S2 Compression + +S2 is an extension of [Snappy](https://github.com/google/snappy). + +S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads. + +Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy. +This means that S2 can seamlessly replace Snappy without converting compressed content. + +S2 can produce Snappy compatible output, faster and better than Snappy. +If you want full benefit of the changes you should use s2 without Snappy compatibility. + +S2 is designed to have high throughput on content that cannot be compressed. +This is important, so you don't have to worry about spending CPU cycles on already compressed data. + +## Benefits over Snappy + +* Better compression +* Adjustable compression (3 levels) +* Concurrent stream compression +* Faster decompression, even for Snappy compatible content +* Ability to quickly skip forward in compressed stream +* Random seeking with indexes +* Compatible with reading Snappy compressed content +* Smaller block size overhead on incompressible blocks +* Block concatenation +* Uncompressed stream mode +* Automatic stream size padding +* Snappy compatible block compression + +## Drawbacks over Snappy + +* Not optimized for 32 bit systems +* Streams use slightly more memory due to larger blocks and concurrency (configurable) + +# Usage + +Installation: `go get -u github.com/klauspost/compress/s2` + +Full package documentation: + +[![godoc][1]][2] + +[1]: https://godoc.org/github.com/klauspost/compress?status.svg +[2]: https://godoc.org/github.com/klauspost/compress/s2 + +## Compression + +```Go +func EncodeStream(src io.Reader, dst io.Writer) error { + enc := s2.NewWriter(dst) + _, err := io.Copy(enc, src) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete. + +For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method. + +The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2. +It is possible to flush any buffered data using the `Flush()` method. +This will block until all data sent to the encoder has been written to the output. + +S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader. + +As a final method to compress data, if you have a single block of data you would like to have encoded as a stream, +a slightly more efficient method is to use the `EncodeBuffer` method. +This will take ownership of the buffer until the stream is closed. + +```Go +func EncodeStream(src []byte, dst io.Writer) error { + enc := s2.NewWriter(dst) + // The encoder owns the buffer until Flush or Close is called. + err := enc.EncodeBuffer(buf) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +Each call to `EncodeBuffer` will result in discrete blocks being created without buffering, +so it should only be used a single time per stream. +If you need to write several blocks, you should use the regular io.Writer interface. + + +## Decompression + +```Go +func DecodeStream(src io.Reader, dst io.Writer) error { + dec := s2.NewReader(src) + _, err := io.Copy(dst, dec) + return err +} +``` + +Similar to the Writer, a Reader can be reused using the `Reset` method. + +For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available. +However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed. + +For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`. +Do however note that these functions (similar to Snappy) does not provide validation of data, +so data corruption may be undetected. Stream encoding provides CRC checks of data. + +It is possible to efficiently skip forward in a compressed stream using the `Skip()` method. +For big skips the decompressor is able to skip blocks without decompressing them. + +## Single Blocks + +Similar to Snappy S2 offers single block compression. +Blocks do not offer the same flexibility and safety as streams, +but may be preferable for very small payloads, less than 100K. + +Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result. +It is possible to provide a destination buffer. +If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used. +If not a new will be allocated. + +Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression. + +Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`. +Again an optional destination buffer can be supplied. +The `s2.DecodedLen(src)` can be used to get the minimum capacity needed. +If that is not satisfied a new buffer will be allocated. + +Block function always operate on a single goroutine since it should only be used for small payloads. + +# Commandline tools + +Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression. + +Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases). + +Installing then requires Go to be installed. To install them, use: + +`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest` + +To build binaries to the current folder use: + +`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d` + + +## s2c + +``` +Usage: s2c [options] file1 file2 + +Compresses all files supplied as input separately. +Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'. +By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and compressed. +Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -blocksize string + Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -faster + Compress faster, but with a minor compression loss + -help + Display help + -index + Add seek index (default true) + -o string + Write output to another file. Single input file only + -pad string + Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -slower + Compress more, but a lot slower + -snappy + Generate Snappy compatible output stream + -verify + Verify written files + +``` + +## s2d + +``` +Usage: s2d [options] file1 file2 + +Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'. +Output file names have the extension removed. By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and decompressed. +Extensions on downloaded files are ignored. Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -c Write all output to stdout. Multiple input files will be concatenated + -help + Display help + -o string + Write output to another file. Single input file only + -offset string + Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful decompression + -safe + Do not overwrite output files + -tail string + Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -verify + Verify files, but do not write output +``` + +## s2sx: self-extracting archives + +s2sx allows creating self-extracting archives with no dependencies. + +By default, executables are created for the same platforms as the host os, +but this can be overridden with `-os` and `-arch` parameters. + +Extracted files have 0666 permissions, except when untar option used. + +``` +Usage: s2sx [options] file1 file2 + +Compresses all files supplied as input separately. +If files have '.s2' extension they are assumed to be compressed already. +Output files are written as 'filename.s2sx' and with '.exe' for windows targets. +If output is big, an additional file with ".more" is written. This must be included as well. +By default output files will be overwritten. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +Options: + -arch string + Destination architecture (default "amd64") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -help + Display help + -max string + Maximum executable size. Rest will be written to another file. (default "1G") + -os string + Destination operating system (default "windows") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -untar + Untar on destination +``` + +Available platforms are: + + * darwin-amd64 + * darwin-arm64 + * linux-amd64 + * linux-arm + * linux-arm64 + * linux-mips64 + * linux-ppc64le + * windows-386 + * windows-amd64 + +By default, there is a size limit of 1GB for the output executable. + +When this is exceeded the remaining file content is written to a file called +output+`.more`. This file must be included for a successful extraction and +placed alongside the executable for a successful extraction. + +This file *must* have the same name as the executable, so if the executable is renamed, +so must the `.more` file. + +This functionality is disabled with stdin/stdout. + +### Self-extracting TAR files + +If you wrap a TAR file you can specify `-untar` to make it untar on the destination host. + +Files are extracted to the current folder with the path specified in the tar file. + +Note that tar files are not validated before they are wrapped. + +For security reasons files that move below the root folder are not allowed. + +# Performance + +This section will focus on comparisons to Snappy. +This package is solely aimed at replacing Snappy as a high speed compression package. +If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) +gives better compression, but typically at speeds slightly below "better" mode in this package. + +Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation. + +Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput. + +A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain. +The content compressed in this mode is fully compatible with the standard decoder. + +Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): + +| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% | +| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% | +| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% | +| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% | +| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - | +| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% | +| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% | +| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% | +| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - | +| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% | +| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - | +| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% | +| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% | +| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - | +| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% | +| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - | + +### Legend + +* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 throughput`: Throughput of S2 in MB/s. +* `S2 % smaller`: How many percent of the Snappy output size is S2 better. +* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression. + +There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. + +Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size. + +The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. + +Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup. +This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above). + +## Decompression + +S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used. + +S2 vs Snappy **decompression** speed. Both operating on single core: + +| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy | +|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x | +| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x | +| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x | +| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x | +| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x | + +### Legend + +* `S2 Throughput`: Decompression speed of S2 encoded content. +* `Better Throughput`: Decompression speed of S2 "better" encoded content. +* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed. + + +While the decompression code hasn't changed, there is a significant speedup in decompression speed. +S2 prefers longer matches and will typically only find matches that are 6 bytes or longer. +While this reduces compression a bit, it improves decompression speed. + +The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy. + +Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: + +| File | S2 Throughput | S2 throughput | +|--------------------------------|--------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | + +Even though S2 typically compresses better than Snappy, decompression speed is always better. + +## Block compression + + +When compressing blocks no concurrent compression is performed just as Snappy. +This is because blocks are for smaller payloads and generally will not benefit from concurrent compression. + +An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input. +In rare, worst case scenario Snappy blocks could be significantly bigger than the input. + +### Mixed content blocks + +The most reliable is a wide dataset. +For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|-----------|--------| +| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** | +| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 | +| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 | +| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 | +| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 | + +S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". +"Better" mode provides the same compression speed as LZ4 with better compression ratio. + +When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression. + +As can be seen from the other benchmarks decompression should also be easier on the S2 generated output. + +Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for +other Go compressors: + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|-----------|--------| +| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 | +| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 | +| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 | +| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 | + +### Standard block compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above. + +Block compression. Parallel benchmark running on 16 cores, 16 goroutines. + +AMD64 assembly is use for both S2 and Snappy. + +| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | +|-----------------------|-------------|---------|--------------|-------------|-------------|-------------| +| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s | + + +| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed | +|-----------------------|-------------|------------------|----------|--------------| +| html | 22.31% | 7.58% | 1.07x | 1.20x | +| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x | +| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x | +| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x | +| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x | +| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x | +| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x | +| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x | +| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x | +| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x | +| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x | +| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x | +| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x | +| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x | +| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x | +| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x | + +Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. + +Decompression speed is better than Snappy, except in one case. + +Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline. + +Size is on average around Snappy, but varies on content type. +In cases where compression is worse, it usually is compensated by a speed boost. + + +### Better compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | +|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| +| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s | + + +| Relative Perf | Snappy size | Better size | Better Speed | Better dec | +|-----------------------|-------------|-------------|--------------|------------| +| html | 22.31% | 13.18% | 0.48x | 0.98x | +| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x | +| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x | +| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x | +| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x | +| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x | +| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x | +| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x | +| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x | +| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x | +| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x | +| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x | +| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x | +| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x | +| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x | +| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x | + +Except for the mostly incompressible JPEG image compression is better and usually in the +double digits in terms of percentage reduction over Snappy. + +The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder +to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down. + +This mode aims to provide better compression at the expense of performance and achieves that +without a huge performance penalty, except on very small blocks. + +Decompression speed suffers a little compared to the regular S2 mode, +but still manages to be close to Snappy in spite of increased compression. + +# Best compression mode + +S2 offers a "best" compression mode. + +This will compress as much as possible with little regard to CPU usage. + +Mainly for offline compression, but where decompression speed should still +be high and compatible with other S2 compressed data. + +Some examples compared on 16 core CPU, amd64 assembly used: + +``` +* enwik10 +Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s +Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s +Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s + +* github-june-2days-2019.json +Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s + +* nyc-taxi-data-10M.csv +Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s + +* 10gb.tar +Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s +Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/ + +* consensus.db.10gb +Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s +Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s +Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s +``` + +Decompression speed should be around the same as using the 'better' compression mode. + +# Snappy Compatibility + +S2 now offers full compatibility with Snappy. + +This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output. + +There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by +simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`. +This uses "better" mode for all operations. +If you would like more control, you can use the s2 package as described below: + +## Blocks + +Snappy compatible blocks can be generated with the S2 encoder. +Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace + +| Snappy | S2 replacement | +|----------------------------|-------------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | + +`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. + +`s2.ConcatBlocks` is compatible with snappy blocks. + +Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: + +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------ +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%**| + +## Streams + +For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`. +All other options are available, but note that block size limit is different for snappy. + +Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput: + +| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best | +|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------| +| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s | +| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s | +| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s | +| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s | +| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s | + +# Decompression + +All decompression functions map directly to equivalent s2 functions. + +| Snappy | S2 replacement | +|------------------------|--------------------| +| snappy.Decode(...) | s2.Decode(...) | +| snappy.DecodedLen(...) | s2.DecodedLen(...) | +| snappy.NewReader(...) | s2.NewReader(...) | + +Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip) +are also available for Snappy streams. + +If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize) +on your Reader will reduce memory consumption. + +# Concatenating blocks and streams. + +Concatenating streams will concatenate the output of both without recompressing them. +While this is inefficient in terms of compression it might be usable in certain scenarios. +The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement. + +Blocks can be concatenated using the `ConcatBlocks` function. + +Snappy blocks/streams can safely be concatenated with S2 blocks and streams. +Streams with indexes (see below) will currently not work on concatenated streams. + +# Stream Seek Index + +S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data. + +The index can either be appended to the stream as a skippable block or returned for separate storage. + +When the index is appended to a stream it will be skipped by regular decoders, +so the output remains compatible with other decoders. + +## Creating an Index + +To automatically add an index to a stream, add `WriterAddIndex()` option to your writer. +Then the index will be added to the stream when `Close()` is called. + +``` + // Add Index to stream... + enc := s2.NewWriter(w, s2.WriterAddIndex()) + io.Copy(enc, r) + enc.Close() +``` + +If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`. +This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`. + +``` + // Get index for separate storage... + enc := s2.NewWriter(w) + io.Copy(enc, r) + index, err := enc.CloseIndex() +``` + +The `index` can then be used needing to read from the stream. +This means the index can be used without needing to seek to the end of the stream +or for manually forwarding streams. See below. + +Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function. + +## Using Indexes + +To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available. + +Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader. + +If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported. +Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, nil) + rs.Seek(wantOffset, io.SeekStart) +``` + +Get a seeker to seek forward. Since no index is provided, the index is read from the stream. +This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +A custom index can be specified which will be used if supplied. +When using a custom index, it will not be read from the input stream. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(true, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +Finally, since we specify that we want to do random seeking `r` must be an io.Seeker. + +The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader, +meaning changes performed to one is reflected in the other. + +To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used. + +## Manually Forwarding Streams + +Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type. +This can be used for parsing indexes, either separate or in streams. + +In some cases it may not be possible to serve a seekable stream. +This can for instance be an HTTP stream, where the Range request +is sent at the start of the stream. + +With a little bit of extra code it is still possible to use indexes +to forward to specific offset with a single forward skip. + +It is possible to load the index manually like this: +``` + var index s2.Index + _, err = index.Load(idxBytes) +``` + +This can be used to figure out how much to offset the compressed stream: + +``` + compressedOffset, uncompressedOffset, err := index.Find(wantOffset) +``` + +The `compressedOffset` is the number of bytes that should be skipped +from the beginning of the compressed file. + +The `uncompressedOffset` will then be offset of the uncompressed bytes returned +when decoding from that position. This will always be <= wantOffset. + +When creating a decoder it must be specified that it should *not* expect a stream identifier +at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset` +we create the decoder like this: + +``` + dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier()) +``` + +We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want. +This is done using the regular "Skip" function: + +``` + err = dec.Skip(wantOffset - uncompressedOffset) +``` + +This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. + +## Index Format: + +Each block is structured as a snappy skippable block, with the chunk ID 0x99. + +The block can be read from the front, but contains information so it can be read from the back as well. + +Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), +with un-encoded value length of 64 bits, unless other limits are specified. + +| Content | Format | +|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | + +For regular streams the uncompressed offsets are fully predictable, +so `HasUncompressedOffsets` allows to specify that compressed blocks all have +exactly `EstBlockSize` bytes of uncompressed content. + +Entries *must* be in order, starting with the lowest offset, +and there *must* be no uncompressed offset duplicates. +Entries *may* point to the start of a skippable block, +but it is then not allowed to also have an entry for the next block since +that would give an uncompressed offset duplicate. + +There is no requirement for all blocks to be represented in the index. +In fact there is a maximum of 65536 block entries in an index. + +The writer can use any method to reduce the number of entries. +An implicit block start at 0,0 can be assumed. + +### Decoding entries: + +``` +// Read Uncompressed entries. +// Each assumes EstBlockSize delta from previous. +for each entry { + uOff = 0 + if HasUncompressedOffsets == 1 { + uOff = ReadVarInt // Read value from stream + } + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].UncompressedOffset = uOff + continue + } + + // Uncompressed uses previous offset and adds EstBlockSize + entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize +} + + +// Guess that the first block will be 50% of uncompressed size. +// Integer truncating division must be used. +CompressGuess := EstBlockSize / 2 + +// Read Compressed entries. +// Each assumes CompressGuess delta from previous. +// CompressGuess is adjusted for each value. +for each entry { + cOff = ReadVarInt // Read value from stream + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].CompressedOffset = cOff + continue + } + + // Compressed uses previous and our estimate. + entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff + + // Adjust compressed offset for next loop, integer truncating division must be used. + CompressGuess += cOff/2 +} +``` + +# Format Extensions + +* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. +* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB). +* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset. + +Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0. + +The length is specified by reading the 3-bit length specified in the tag and decode using this table: + +| Length | Actual Length | +|--------|----------------------| +| 0 | 4 | +| 1 | 5 | +| 2 | 6 | +| 3 | 7 | +| 4 | 8 | +| 5 | 8 + read 1 byte | +| 6 | 260 + read 2 bytes | +| 7 | 65540 + read 3 bytes | + +This allows any repeat offset + length to be represented by 2 to 5 bytes. + +Lengths are stored as little endian values. + +The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams. + +Default streaming block size is 1MB. + +# LICENSE + +This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. + +Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go new file mode 100644 index 00000000..9e7fce88 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -0,0 +1,762 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("s2: corrupt input") + // ErrCRC reports that the input failed CRC validation (streams only) + ErrCRC = errors.New("s2: corrupt input, crc mismatch") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("s2: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("s2: unsupported input") +) + +// ErrCantSeek is returned if the stream cannot be seeked. +type ErrCantSeek struct { + Reason string +} + +// Error returns the error as string. +func (e ErrCantSeek) Error() string { + return fmt.Sprintf("s2: Can't seek because %s", e.Reason) +} + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + if s2Decode(dst, src[s:]) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes. +func NewReader(r io.Reader, opts ...ReaderOption) *Reader { + nr := Reader{ + r: r, + maxBlock: maxBlockSize, + } + for _, opt := range opts { + if err := opt(&nr); err != nil { + nr.err = err + return &nr + } + } + nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize + if nr.lazyBuf > 0 { + nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize) + } else { + nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize) + } + nr.readHeader = nr.ignoreStreamID + nr.paramsOK = true + return &nr +} + +// ReaderOption is an option for creating a decoder. +type ReaderOption func(*Reader) error + +// ReaderMaxBlockSize allows to control allocations if the stream +// has been compressed with a smaller WriterBlockSize, or with the default 1MB. +// Blocks must be this size or smaller to decompress, +// otherwise the decoder will return ErrUnsupported. +// +// For streams compressed with Snappy this can safely be set to 64KB (64 << 10). +// +// Default is the maximum limit of 4MB. +func ReaderMaxBlockSize(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize <= 0 { + return errors.New("s2: block size too large. Must be <= 4MB and > 0") + } + if r.lazyBuf == 0 && blockSize < defaultBlockSize { + r.lazyBuf = blockSize + } + r.maxBlock = blockSize + return nil + } +} + +// ReaderAllocBlock allows to control upfront stream allocations +// and not allocate for frames bigger than this initially. +// If frames bigger than this is seen a bigger buffer will be allocated. +// +// Default is 1MB, which is default output size. +func ReaderAllocBlock(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize < 1024 { + return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024") + } + r.lazyBuf = blockSize + return nil + } +} + +// ReaderIgnoreStreamIdentifier will make the reader skip the expected +// stream identifier at the beginning of the stream. +// This can be used when serving a stream that has been forwarded to a specific point. +func ReaderIgnoreStreamIdentifier() ReaderOption { + return func(r *Reader) error { + r.ignoreStreamID = true + return nil + } +} + +// ReaderSkippableCB will register a callback for chuncks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { + return func(r *Reader) error { + if id < 0x80 || id > 0xfd { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") + } + r.skippableCB[id] = fn + return nil + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + skippableCB [0x80]func(r io.Reader) error + blockStart int64 // Uncompressed offset at start of current. + index *Index + + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + // maximum block size allowed. + maxBlock int + // maximum expected buffer size. + maxBufSize int + // alloc a buffer this size if > 0. + lazyBuf int + readHeader bool + paramsOK bool + snappyFrame bool + ignoreStreamID bool +} + +// ensureBufferSize will ensure that the buffer can take at least n bytes. +// If false is returned the buffer exceeds maximum allowed size. +func (r *Reader) ensureBufferSize(n int) bool { + if len(r.buf) >= n { + return true + } + if n > r.maxBufSize { + r.err = ErrCorrupt + return false + } + // Realloc buffer. + r.buf = make([]byte, n) + return true +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + if !r.paramsOK { + return + } + r.index = nil + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = r.ignoreStreamID +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// skippable will skip n bytes. +// If the supplied reader supports seeking that is used. +// tmp is used as a temporary buffer for reading. +// The supplied slice does not need to be the size of the read. +func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { + if id < 0x80 { + r.err = fmt.Errorf("interbal error: skippable id < 0x80") + return false + } + if fn := r.skippableCB[id-0x80]; fn != nil { + rd := io.LimitReader(r.r, int64(n)) + r.err = fn(rd) + if r.err != nil { + return false + } + _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp) + return r.err == nil + } + if rs, ok := r.r.(io.ReadSeeker); ok { + _, err := rs.Seek(int64(n), io.SeekCurrent) + if err == nil { + return true + } + if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + return false + } + } + for n > 0 { + if n < len(tmp) { + tmp = tmp[:n] + } + if _, r.err = io.ReadFull(r.r, tmp); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + n -= len(tmp) + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } +} + +// Skip will skip n bytes forward in the decompressed output. +// For larger skips this consumes less CPU and is faster than reading output and discarding it. +// CRC is not checked on skipped blocks. +// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped. +// If a decoding error is encountered subsequent calls to Read will also fail. +func (r *Reader) Skip(n int64) error { + if n < 0 { + return errors.New("attempted negative skip") + } + if r.err != nil { + return r.err + } + + for n > 0 { + if r.i < r.j { + // Skip in buffer. + // decoded[i:j] contains decoded bytes that have not yet been passed on. + left := int64(r.j - r.i) + if left >= n { + r.i += int(n) + return nil + } + n -= int64(r.j - r.i) + r.i = r.j + } + + // Buffer empty; read blocks until we have content. + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = io.ErrUnexpectedEOF + } + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + dLen, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if dLen > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + // Check if destination is within this block + if int64(dLen) > n { + if len(r.decoded) < dLen { + r.decoded = make([]byte, dLen) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:dLen]) != checksum { + r.err = ErrCorrupt + return r.err + } + } else { + // Skip block completely + n -= int64(dLen) + dLen = 0 + } + r.i, r.j = 0, dLen + continue + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err != nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n2 := chunkLen - checksumSize + if n2 > len(r.decoded) { + if n2 > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + r.decoded = make([]byte, n2) + } + if !r.readFull(r.decoded[:n2], false) { + return r.err + } + if int64(n2) < n { + if crc(r.decoded[:n2]) != checksum { + r.err = ErrCorrupt + return r.err + } + } + r.i, r.j = 0, n2 + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return r.err + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + if chunkLen > maxChunkSize { + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return r.err + } + } + return nil +} + +// ReadSeeker provides random or forward seeking in compressed content. +// See Reader.ReadSeeker +type ReadSeeker struct { + *Reader +} + +// ReadSeeker will return an io.ReadSeeker compatible version of the reader. +// If 'random' is specified the returned io.Seeker can be used for +// random seeking, otherwise only forward seeking is supported. +// Enabling random seeking requires the original input to support +// the io.Seeker interface. +// A custom index can be specified which will be used if supplied. +// When using a custom index, it will not be read from the input stream. +// The returned ReadSeeker contains a shallow reference to the existing Reader, +// meaning changes performed to one is reflected in the other. +func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { + // Read index if provided. + if len(index) != 0 { + if r.index == nil { + r.index = &Index{} + } + if _, err := r.index.Load(index); err != nil { + return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()} + } + } + + // Check if input is seekable + rs, ok := r.r.(io.ReadSeeker) + if !ok { + if !random { + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream isn't seekable"} + } + + if r.index != nil { + // Seekable and index, ok... + return &ReadSeeker{Reader: r}, nil + } + + // Load from stream. + r.index = &Index{} + + // Read current position. + pos, err := rs.Seek(0, io.SeekCurrent) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + err = r.index.LoadStream(rs) + if err != nil { + if err == ErrUnsupported { + return nil, ErrCantSeek{Reason: "input stream does not contain an index"} + } + return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()} + } + + // reset position. + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + return &ReadSeeker{Reader: r}, nil +} + +// Seek allows seeking in compressed data. +func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { + if r.err != nil { + return 0, r.err + } + if offset == 0 && whence == io.SeekCurrent { + return r.blockStart + int64(r.i), nil + } + if !r.readHeader { + // Make sure we read the header. + _, r.err = r.Read([]byte{}) + } + rs, ok := r.r.(io.ReadSeeker) + if r.index == nil || !ok { + if whence == io.SeekCurrent && offset >= 0 { + err := r.Skip(offset) + return r.blockStart + int64(r.i), err + } + if whence == io.SeekStart && offset >= r.blockStart+int64(r.i) { + err := r.Skip(offset - r.blockStart - int64(r.i)) + return r.blockStart + int64(r.i), err + } + return 0, ErrUnsupported + + } + + switch whence { + case io.SeekCurrent: + offset += r.blockStart + int64(r.i) + case io.SeekEnd: + offset = -offset + } + c, u, err := r.index.Find(offset) + if err != nil { + return r.blockStart + int64(r.i), err + } + + // Seek to next block + _, err = rs.Seek(c, io.SeekStart) + if err != nil { + return 0, err + } + + if offset < 0 { + offset = r.index.TotalUncompressed + offset + } + + r.i = r.j // Remove rest of current block. + if u < offset { + // Forward inside block + return offset, r.Skip(offset - u) + } + return offset, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + if r.i < r.j { + c := r.decoded[r.i] + r.i++ + return c, nil + } + var tmp [1]byte + for i := 0; i < 10; i++ { + n, err := r.Read(tmp[:]) + if err != nil { + return 0, err + } + if n == 1 { + return tmp[0], nil + } + } + return 0, io.ErrNoProgress +} + +// SkippableCB will register a callback for chunks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// Sending a nil function will disable previous callbacks. +func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") + } + r.skippableCB[id] = fn + return nil +} diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s new file mode 100644 index 00000000..9b105e03 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s @@ -0,0 +1,568 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 AX +#define R_TMP1 BX +#define R_LEN CX +#define R_OFF DX +#define R_SRC SI +#define R_DST DI +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x (shared) +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $48-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DST + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SRC + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + XORQ R_OFF, R_OFF + +loop: + // for s < len(src) + CMPQ R_SRC, R_SEND + JEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (R_SRC), R_LEN + MOVL R_LEN, R_TMP1 + ANDL $3, R_TMP1 + CMPL R_TMP1, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, R_LEN + CMPL R_LEN, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + INCQ R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVQ R_DEND, R_TMP0 + SUBQ R_DST, R_TMP0 + MOVQ R_SEND, R_TMP1 + SUBQ R_SRC, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ R_LEN, $16 + JGT callMemmove + CMPQ R_TMP0, $16 + JLT callMemmove + CMPQ R_TMP1, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R_SRC), X0 + MOVOU X0, 0(R_DST) + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ R_LEN, R_TMP0 + JGT errCorrupt + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ R_DST, 0(SP) + MOVQ R_SRC, 8(SP) + MOVQ R_LEN, 16(SP) + MOVQ R_DST, 24(SP) + MOVQ R_SRC, 32(SP) + MOVQ R_LEN, 40(SP) + MOVQ R_OFF, 48(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVQ 24(SP), R_DST + MOVQ 32(SP), R_SRC + MOVQ 40(SP), R_LEN + MOVQ 48(SP), R_OFF + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ R_LEN, R_SRC + SUBQ $58, R_SRC + CMPQ R_SRC, R_SEND + JA errCorrupt + + // case x == 60: + CMPL R_LEN, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(R_SRC), R_LEN + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(R_SRC), R_LEN + JMP doLit + +tagLit62Plus: + CMPL R_LEN, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + // We read one byte, safe to read one back, since we are just reading tag. + // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8 + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(R_SRC), R_LEN + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(R_SRC), R_OFF + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(R_SRC), R_OFF + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMPQ R_TMP1, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // length = 4 + int(src[s-2])>>2&0x7 + MOVBQZX -1(R_SRC), R_TMP1 + MOVQ R_LEN, R_TMP0 + SHRQ $2, R_LEN + ANDQ $0xe0, R_TMP0 + ANDQ $7, R_LEN + SHLQ $3, R_TMP0 + ADDQ $4, R_LEN + ORQ R_TMP1, R_TMP0 + + // check if repeat code, ZF set by ORQ. + JZ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (length) + MOVQ R_TMP0, R_OFF + JMP doCopy + +// This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMPQ R_LEN, $9 + JL doCopyRepeat + + // Read additional bytes for length. + JE repeatLen1 + + // Rare, so the extra branch shouldn't hurt too much. + CMPQ R_LEN, $10 + JE repeatLen2 + JMP repeatLen3 + +// Read repeat lengths. +repeatLen1: + // s ++ + ADDQ $1, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = src[s-1] + 8 + MOVBQZX -1(R_SRC), R_LEN + ADDL $8, R_LEN + JMP doCopyRepeat + +repeatLen2: + // s +=2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8) + MOVWQZX -2(R_SRC), R_LEN + ADDL $260, R_LEN + JMP doCopyRepeat + +repeatLen3: + // s +=3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16) + // Read one byte further back (just part of the tag, shifted out) + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + ADDL $65540, R_LEN + JMP doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVQ R_DST, R_TMP1 + SUBQ R_DBASE, R_TMP1 + CMPQ R_TMP1, R_OFF + JLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + // if offset <= 0 { etc } + CMPQ R_OFF, $0 + JLE errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R_DEND, R_TMP1 + SUBQ R_DST, R_TMP1 + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVQ R_DEND, R_TMP2 + SUBQ R_DST, R_TMP2 + MOVQ R_DST, R_TMP3 + SUBQ R_OFF, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ R_LEN, $16 + JGT slowForwardCopy + CMPQ R_OFF, $8 + JLT slowForwardCopy + CMPQ R_TMP2, $16 + JLT slowForwardCopy + MOVQ 0(R_TMP3), R_TMP0 + MOVQ R_TMP0, 0(R_DST) + MOVQ 8(R_TMP3), R_TMP1 + MOVQ R_TMP1, 8(R_DST) + ADDQ R_LEN, R_DST + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R_TMP2 + CMPQ R_LEN, R_TMP2 + JGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVQ R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMPQ R_TMP2, $8 + JGE fixUpSlowForwardCopy + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_DST) + SUBQ R_TMP2, R_LEN + ADDQ R_TMP2, R_DST + ADDQ R_TMP2, R_TMP2 + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ R_DST, R_TMP0 + ADDQ R_LEN, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ R_LEN, $0 + JLE loop + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_TMP0) + ADDQ $8, R_TMP3 + ADDQ $8, R_TMP0 + SUBQ $8, R_LEN + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + INCQ R_TMP3 + INCQ R_DST + DECQ R_LEN + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ R_DST, R_DEND + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s new file mode 100644 index 00000000..4b63d508 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s @@ -0,0 +1,574 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 R2 +#define R_TMP1 R3 +#define R_LEN R4 +#define R_OFF R5 +#define R_SRC R6 +#define R_DST R7 +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// TEST_SRC will check if R_SRC is <= SRC_END +#define TEST_SRC() \ + CMP R_SEND, R_SRC \ + BGT errCorrupt + +// MOVD R_SRC, R_TMP1 +// SUB R_SBASE, R_TMP1, R_TMP1 +// CMP R_SLEN, R_TMP1 +// BGT errCorrupt + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $56-64 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DST + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SRC + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + MOVD $0, R_OFF + +loop: + // for s < len(src) + CMP R_SEND, R_SRC + BEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R_SRC), R_LEN + MOVW R_LEN, R_TMP1 + ANDW $3, R_TMP1 + MOVW $1, R1 + CMPW R1, R_TMP1 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R_LEN, R_LEN + CMPW R_LEN, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R_SRC, R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + ADD $1, R_LEN, R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVD R_DEND, R_TMP0 + SUB R_DST, R_TMP0, R_TMP0 + MOVD R_SEND, R_TMP1 + SUB R_SRC, R_TMP1, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R_LEN + BGT callMemmove + CMP $16, R_TMP0 + BLT callMemmove + CMP $16, R_TMP1 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R_SRC), (R_TMP2, R_TMP3) + STP (R_TMP2, R_TMP3), 0(R_DST) + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R_TMP0, R_LEN + BGT errCorrupt + CMP R_TMP1, R_LEN + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R_DST, 8(RSP) + MOVD R_SRC, 16(RSP) + MOVD R_LEN, 24(RSP) + MOVD R_DST, 32(RSP) + MOVD R_SRC, 40(RSP) + MOVD R_LEN, 48(RSP) + MOVD R_OFF, 56(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVD 32(RSP), R_DST + MOVD 40(RSP), R_SRC + MOVD 48(RSP), R_LEN + MOVD 56(RSP), R_OFF + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R_LEN, R_SRC, R_SRC + SUB $58, R_SRC, R_SRC + TEST_SRC() + + // case x == 60: + MOVW $61, R1 + CMPW R1, R_LEN + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R_SRC), R_LEN + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R_SRC), R_LEN + B doLit + +tagLit62Plus: + CMPW $62, R_LEN + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R_SRC), R_LEN + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP1<<16, R_LEN + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R_SRC), R_LEN + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + MOVD R_SRC, R_TMP1 + SUB R_SBASE, R_TMP1, R_TMP1 + CMP R_SLEN, R_TMP1 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R_SRC), R_OFF + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R_SRC), R_OFF + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMP $2, R_TMP1 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // Calculate offset in R_TMP0 in case it is a repeat. + MOVD R_LEN, R_TMP0 + AND $0xe0, R_TMP0 + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP0<<3, R_TMP1, R_TMP0 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R_LEN>>2, R1, R_LEN + ADD $4, R_LEN, R_LEN + + // check if repeat code with offset 0. + CMP $0, R_TMP0 + BEQ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (offset) + MOVD R_TMP0, R_OFF + B doCopy + + // This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMP $9, R_LEN + BLT doCopyRepeat + BEQ repeatLen1 + CMP $10, R_LEN + BEQ repeatLen2 + +repeatLen3: + // s +=3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540 + MOVBU -1(R_SRC), R_TMP0 + MOVHU -3(R_SRC), R_LEN + ORR R_TMP0<<16, R_LEN, R_LEN + ADD $65540, R_LEN, R_LEN + B doCopyRepeat + +repeatLen2: + // s +=2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260 + MOVHU -2(R_SRC), R_LEN + ADD $260, R_LEN, R_LEN + B doCopyRepeat + +repeatLen1: + // s +=1 + ADD $1, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = src[s-1] + 8 + MOVBU -1(R_SRC), R_LEN + ADD $8, R_LEN, R_LEN + B doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVD R_DST, R_TMP1 + SUB R_DBASE, R_TMP1, R_TMP1 + CMP R_OFF, R_TMP1 + BLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + + // if offset <= 0 { etc } + CMP $0, R_OFF + BLE errCorrupt + + // if length > len(dst)-d { etc } + MOVD R_DEND, R_TMP1 + SUB R_DST, R_TMP1, R_TMP1 + CMP R_TMP1, R_LEN + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVD R_DEND, R_TMP2 + SUB R_DST, R_TMP2, R_TMP2 + MOVD R_DST, R_TMP3 + SUB R_OFF, R_TMP3, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R_LEN + BGT slowForwardCopy + CMP $8, R_OFF + BLT slowForwardCopy + CMP $16, R_TMP2 + BLT slowForwardCopy + MOVD 0(R_TMP3), R_TMP0 + MOVD R_TMP0, 0(R_DST) + MOVD 8(R_TMP3), R_TMP1 + MOVD R_TMP1, 8(R_DST) + ADD R_LEN, R_DST, R_DST + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R_TMP2, R_TMP2 + CMP R_TMP2, R_LEN + BGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVD R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMP $8, R_TMP2 + BGE fixUpSlowForwardCopy + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_DST) + SUB R_TMP2, R_LEN, R_LEN + ADD R_TMP2, R_DST, R_DST + ADD R_TMP2, R_TMP2, R_TMP2 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R_DST, R_TMP0 + ADD R_LEN, R_DST, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R_LEN + BLE loop + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_TMP0) + ADD $8, R_TMP3, R_TMP3 + ADD $8, R_TMP0, R_TMP0 + SUB $8, R_LEN, R_LEN + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + ADD $1, R_TMP3, R_TMP3 + ADD $1, R_DST, R_DST + SUB $1, R_LEN, R_LEN + CBNZ R_LEN, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R_DEND, R_DST + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R_TMP0 + MOVD R_TMP0, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go new file mode 100644 index 00000000..cb3576ed --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || arm64) && !appengine && gc && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !noasm + +package s2 + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func s2Decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go new file mode 100644 index 00000000..1074ebd2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -0,0 +1,267 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm64 appengine !gc noasm + +package s2 + +import ( + "fmt" + "strconv" +) + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2Decode(dst, src []byte) int { + const debug = false + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := 0 + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + s += 3 + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go new file mode 100644 index 00000000..59f992ca --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode.go @@ -0,0 +1,1347 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/bits" + "runtime" + "sync" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlock(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBetter(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBest(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappy returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappy(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBetterSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBestSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination. +// If the destination is nil or too small, a new will be allocated. +// The blocks are not validated, so garbage in = garbage out. +// dst may not overlap block data. +// Any data in dst is preserved as is, so it will not be considered a block. +func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) { + totalSize := uint64(0) + compSize := 0 + for _, b := range blocks { + l, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + totalSize += uint64(l) + compSize += len(b) - hdr + } + if totalSize == 0 { + dst = append(dst, 0) + return dst, nil + } + if totalSize > math.MaxUint32 { + return nil, ErrTooLarge + } + var tmp [binary.MaxVarintLen32]byte + hdrSize := binary.PutUvarint(tmp[:], totalSize) + wantSize := hdrSize + compSize + + if cap(dst)-len(dst) < wantSize { + dst = append(make([]byte, 0, wantSize+len(dst)), dst...) + } + dst = append(dst, tmp[:hdrSize]...) + for _, b := range blocks { + _, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + dst = append(dst, b[hdr:]...) + } + return dst, nil +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 8 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// will be accepted by the encoder. +const minNonLiteralBlockSize = 32 + +// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size. +// Blocks this big are highly discouraged, though. +const MaxBlockSize = math.MaxUint32 - binary.MaxVarintLen32 - 5 + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +// 32 bit platforms will have lower thresholds for rejecting big content. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + // Also includes negative. + return -1 + } + // Size of the varint encoded block size. + n = n + uint64((bits.Len64(n)+7)/7) + + // Add maximum size of encoding block as literals. + n += uint64(literalExtraSize(int64(srcLen))) + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("s2: Writer is closed") + +// NewWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// Users must call Close to guarantee all data has been forwarded to +// the underlying io.Writer and that resources are released. +// They may also call Flush zero or more times before calling Close. +func NewWriter(w io.Writer, opts ...WriterOption) *Writer { + w2 := Writer{ + blockSize: defaultBlockSize, + concurrency: runtime.GOMAXPROCS(0), + randSrc: rand.Reader, + level: levelFast, + } + for _, opt := range opts { + if err := opt(&w2); err != nil { + w2.errState = err + return &w2 + } + } + w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize) + w2.paramsOK = true + w2.ibuf = make([]byte, 0, w2.blockSize) + w2.buffers.New = func() interface{} { + return make([]byte, w2.obufLen) + } + w2.Reset(w) + return &w2 +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + errMu sync.Mutex + errState error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + ibuf []byte + + blockSize int + obufLen int + concurrency int + written int64 + uncompWritten int64 // Bytes sent to compression + output chan chan result + buffers sync.Pool + pad int + + writer io.Writer + randSrc io.Reader + writerWg sync.WaitGroup + index Index + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool + paramsOK bool + snappy bool + flushOnWrite bool + appendIndex bool + level uint8 +} + +const ( + levelUncompressed = iota + 1 + levelFast + levelBetter + levelBest +) + +type result struct { + b []byte + // Uncompressed start offset + startOffset int64 +} + +// err returns the previously set error. +// If no error has been set it is set to err if not nil. +func (w *Writer) err(err error) error { + w.errMu.Lock() + errSet := w.errState + if errSet == nil && err != nil { + w.errState = err + errSet = err + } + w.errMu.Unlock() + return errSet +} + +// Reset discards the writer's state and switches the Snappy writer to write to w. +// This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + if !w.paramsOK { + return + } + // Close previous writer, if any. + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + w.errState = nil + w.ibuf = w.ibuf[:0] + w.wroteStreamHeader = false + w.written = 0 + w.writer = writer + w.uncompWritten = 0 + w.index.reset(w.blockSize) + + // If we didn't get a writer, stop here. + if writer == nil { + return + } + // If no concurrency requested, don't spin up writer goroutine. + if w.concurrency == 1 { + return + } + + toWrite := make(chan chan result, w.concurrency) + w.output = toWrite + w.writerWg.Add(1) + + // Start a writer goroutine that will write all output in order. + go func() { + defer w.writerWg.Done() + + // Get a queued write. + for write := range toWrite { + // Wait for the data to be available. + input := <-write + in := input.b + if len(in) > 0 { + if w.err(nil) == nil { + // Don't expose data from previous buffers. + toWrite := in[:len(in):len(in)] + // Write to output. + n, err := writer.Write(toWrite) + if err == nil && n != len(toWrite) { + err = io.ErrShortBuffer + } + _ = w.err(err) + w.err(w.index.add(w.written, input.startOffset)) + w.written += int64(n) + } + } + if cap(in) >= w.obufLen { + w.buffers.Put(in) + } + // close the incoming write request. + // This can be used for synchronizing flushes. + close(write) + } + }() +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.flushOnWrite { + return w.write(p) + } + // If we exceed the input buffer size, start writing + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + } + nRet += n + p = p[n:] + } + if err := w.err(nil); err != nil { + return nRet, err + } + // p should always be able to fit into w.ibuf now. + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +// ReadFrom implements the io.ReaderFrom interface. +// Using this is typically more efficient since it avoids a memory copy. +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if err := w.err(nil); err != nil { + return 0, err + } + if len(w.ibuf) > 0 { + err := w.Flush() + if err != nil { + return 0, err + } + } + if br, ok := r.(byter); ok { + buf := br.Bytes() + if err := w.EncodeBuffer(buf); err != nil { + return 0, err + } + return int64(len(buf)), w.Flush() + } + for { + inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen] + n2, err := io.ReadFull(r, inbuf[obufHeaderLen:]) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + if err != io.EOF { + return n, w.err(err) + } + } + if n2 == 0 { + break + } + n += int64(n2) + err2 := w.writeFull(inbuf[:n2+obufHeaderLen]) + if w.err(err2) != nil { + break + } + + if err != nil { + // We got EOF and wrote everything + break + } + } + + return n, w.err(nil) +} + +// AddSkippableBlock will add a skippable block to the stream. +// The ID must be 0x80-0xfe (inclusive). +// Length of the skippable block must be <= 16777215 bytes. +func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + if len(data) == 0 { + return nil + } + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("invalid skippable block id %x", id) + } + if len(data) > maxChunkSize { + return fmt.Errorf("skippable block excessed maximum size") + } + var header [4]byte + chunkLen := 4 + len(data) + header[0] = id + header[1] = uint8(chunkLen >> 0) + header[2] = uint8(chunkLen >> 8) + header[3] = uint8(chunkLen >> 16) + if w.concurrency == 1 { + write := func(b []byte) error { + n, err := w.writer.Write(b) + if err = w.err(err); err != nil { + return err + } + if n != len(data) { + return w.err(io.ErrShortWrite) + } + w.written += int64(n) + return w.err(nil) + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + if w.snappy { + if err := write([]byte(magicChunkSnappy)); err != nil { + return err + } + } else { + if err := write([]byte(magicChunk)); err != nil { + return err + } + } + } + if err := write(header[:]); err != nil { + return err + } + if err := write(data); err != nil { + return err + } + } + + // Create output... + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + // Copy input. + inbuf := w.buffers.Get().([]byte)[:4] + copy(inbuf, header[:]) + inbuf = append(inbuf, data...) + + output := make(chan result, 1) + // Queue output. + w.output <- output + output <- result{startOffset: w.uncompWritten, b: inbuf} + + return nil +} + +// EncodeBuffer will add a buffer to the stream. +// This is the fastest way to encode a stream, +// but the input buffer cannot be written to by the caller +// until Flush or Close has been called when concurrency != 1. +// +// If you cannot control that, use the regular Write function. +// +// Note that input is not buffered. +// This means that each write will result in discrete blocks being created. +// For buffered writes, use the regular Write function. +func (w *Writer) EncodeBuffer(buf []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + + if w.flushOnWrite { + _, err := w.write(buf) + return err + } + // Flush queued data first. + if len(w.ibuf) > 0 { + err := w.Flush() + if err != nil { + return err + } + } + if w.concurrency == 1 { + _, err := w.writeSync(buf) + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + for len(buf) > 0 { + // Cut input. + uncompressed := buf + if len(uncompressed) > w.blockSize { + uncompressed = uncompressed[:w.blockSize] + } + buf = buf[len(uncompressed):] + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // copy uncompressed + copy(obuf[obufHeaderLen:], uncompressed) + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + }() + } + return nil +} + +func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { + if w.snappy { + switch w.level { + case levelFast: + return encodeBlockSnappy(obuf, uncompressed) + case levelBetter: + return encodeBlockBetterSnappy(obuf, uncompressed) + case levelBest: + return encodeBlockBestSnappy(obuf, uncompressed) + } + return 0 + } + switch w.level { + case levelFast: + return encodeBlock(obuf, uncompressed) + case levelBetter: + return encodeBlockBetter(obuf, uncompressed) + case levelBest: + return encodeBlockBest(obuf, uncompressed) + } + return 0 +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.concurrency == 1 { + return w.writeSync(p) + } + + // Spawn goroutine and write block to output channel. + for len(p) > 0 { + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + // Copy input. + // If the block is incompressible, this is used for the result. + inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + obuf := w.buffers.Get().([]byte)[:w.obufLen] + copy(inbuf[obufHeaderLen:], uncompressed) + uncompressed = inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + nRet += len(uncompressed) + } + return nRet, nil +} + +// writeFull is a special version of write that will always write the full buffer. +// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer. +// The data will be written as a single block. +// The caller is not allowed to use inbuf after this function has been called. +func (w *Writer) writeFull(inbuf []byte) (errRet error) { + if err := w.err(nil); err != nil { + return err + } + + if w.concurrency == 1 { + _, err := w.writeSync(inbuf[obufHeaderLen:]) + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:w.obufLen] + uncompressed := inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + return nil +} + +func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + var n int + var err error + if w.snappy { + n, err = w.writer.Write([]byte(magicChunkSnappy)) + } else { + n, err = w.writer.Write([]byte(magicChunk)) + } + if err != nil { + return 0, w.err(err) + } + if n != len(magicChunk) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + + for len(p) > 0 { + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + obuf := w.buffers.Get().([]byte)[:w.obufLen] + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + obuf = obuf[:8] + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + n, err := w.writer.Write(obuf) + if err != nil { + return 0, w.err(err) + } + if n != len(obuf) { + return 0, w.err(io.ErrShortWrite) + } + w.err(w.index.add(w.written, w.uncompWritten)) + w.written += int64(n) + w.uncompWritten += int64(len(uncompressed)) + + if chunkType == chunkTypeUncompressedData { + // Write uncompressed data. + n, err := w.writer.Write(uncompressed) + if err != nil { + return 0, w.err(err) + } + if n != len(uncompressed) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + w.buffers.Put(obuf) + // Queue final output. + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +// This does not apply padding. +func (w *Writer) Flush() error { + if err := w.err(nil); err != nil { + return err + } + + // Queue any data still in input buffer. + if len(w.ibuf) != 0 { + if !w.wroteStreamHeader { + _, err := w.writeSync(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err(err) + } else { + _, err := w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + err = w.err(err) + if err != nil { + return err + } + } + } + if w.output == nil { + return w.err(nil) + } + + // Send empty buffer + res := make(chan result) + w.output <- res + // Block until this has been picked up. + res <- result{b: nil, startOffset: w.uncompWritten} + // When it is closed, we have flushed. + <-res + return w.err(nil) +} + +// Close calls Flush and then closes the Writer. +// Calling Close multiple times is ok, +// but calling CloseIndex after this will make it not return the index. +func (w *Writer) Close() error { + _, err := w.closeIndex(w.appendIndex) + return err +} + +// CloseIndex calls Close and returns an index on first call. +// This is not required if you are only adding index to a stream. +func (w *Writer) CloseIndex() ([]byte, error) { + return w.closeIndex(true) +} + +func (w *Writer) closeIndex(idx bool) ([]byte, error) { + err := w.Flush() + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + + var index []byte + if w.err(nil) == nil && w.writer != nil { + // Create index. + if idx { + compSize := int64(-1) + if w.pad <= 1 { + compSize = w.written + } + index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize) + // Count as written for padding. + if w.appendIndex { + w.written += int64(len(index)) + } + if true { + _, err := w.index.Load(index) + if err != nil { + panic(err) + } + } + } + + if w.pad > 1 { + tmp := w.ibuf[:0] + if len(index) > 0 { + // Allocate another buffer. + tmp = w.buffers.Get().([]byte)[:0] + defer w.buffers.Put(tmp) + } + add := calcSkippableFrame(w.written, int64(w.pad)) + frame, err := skippableFrame(tmp, add, w.randSrc) + if err = w.err(err); err != nil { + return nil, err + } + n, err2 := w.writer.Write(frame) + if err2 == nil && n != len(frame) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + if len(index) > 0 && w.appendIndex { + n, err2 := w.writer.Write(index) + if err2 == nil && n != len(index) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + } + err = w.err(errClosed) + if err == errClosed { + return index, nil + } + return nil, err +} + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total) + } + if int64(total) >= maxBlockSize+skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total) + } + // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)" + dst = append(dst, chunkTypePadding) + f := uint32(total - skippableFrameHeader) + // Add chunk length. + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16)) + // Add data + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} + +// WriterOption is an option for creating a encoder. +type WriterOption func(*Writer) error + +// WriterConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WriterConcurrency(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return errors.New("concurrency must be at least 1") + } + w.concurrency = n + return nil + } +} + +// WriterAddIndex will append an index to the end of a stream +// when it is closed. +func WriterAddIndex() WriterOption { + return func(w *Writer) error { + w.appendIndex = true + return nil + } +} + +// WriterBetterCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +func WriterBetterCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBetter + return nil + } +} + +// WriterBestCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// big speed decrease on compression. +func WriterBestCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBest + return nil + } +} + +// WriterUncompressed will bypass compression. +// The stream will be written as uncompressed blocks only. +// If concurrency is > 1 CRC and output will still be done async. +func WriterUncompressed() WriterOption { + return func(w *Writer) error { + w.level = levelUncompressed + return nil + } +} + +// WriterBlockSize allows to override the default block size. +// Blocks will be this size or smaller. +// Minimum size is 4KB and and maximum size is 4MB. +// +// Bigger blocks may give bigger throughput on systems with many cores, +// and will increase compression slightly, but it will limit the possible +// concurrency for smaller payloads for both encoding and decoding. +// Default block size is 1MB. +// +// When writing Snappy compatible output using WriterSnappyCompat, +// the maximum block size is 64KB. +func WriterBlockSize(n int) WriterOption { + return func(w *Writer) error { + if w.snappy && n > maxSnappyBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output") + } + if n > maxBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 4MB and >=4KB") + } + w.blockSize = n + return nil + } +} + +// WriterPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 4MB. +// The padded area will be filled with data from crypto/rand.Reader. +// The padding will be applied whenever Close is called on the writer. +func WriterPadding(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return fmt.Errorf("s2: padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + w.pad = 0 + } + if n > maxBlockSize { + return fmt.Errorf("s2: padding must less than 4MB") + } + w.pad = n + return nil + } +} + +// WriterPaddingSrc will get random data for padding from the supplied source. +// By default crypto/rand is used. +func WriterPaddingSrc(reader io.Reader) WriterOption { + return func(w *Writer) error { + w.randSrc = reader + return nil + } +} + +// WriterSnappyCompat will write snappy compatible output. +// The output can be decompressed using either snappy or s2. +// If block size is more than 64KB it is set to that. +func WriterSnappyCompat() WriterOption { + return func(w *Writer) error { + w.snappy = true + if w.blockSize > 64<<10 { + // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective. + // And allows us to skip some size checks. + w.blockSize = (64 << 10) - 8 + } + return nil + } +} + +// WriterFlushOnWrite will compress blocks on each call to the Write function. +// +// This is quite inefficient as blocks size will depend on the write size. +// +// Use WriterConcurrency(1) to also make sure that output is flushed. +// When Write calls return, otherwise they will be written when compression is done. +func WriterFlushOnWrite() WriterOption { + return func(w *Writer) error { + w.flushOnWrite = true + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go new file mode 100644 index 00000000..8b16c38a --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -0,0 +1,456 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "math/bits" +) + +func load32(b []byte, i int) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load64(b []byte, i int) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + const prime6bytes = 227718039650203 + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +func encodeGo(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockGo(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + + debug = false + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +func encodeBlockSnappyGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go new file mode 100644 index 00000000..e612225f --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -0,0 +1,142 @@ +//go:build !appengine && !noasm && gc +// +build !appengine,!noasm,gc + +package s2 + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) >= 4<<20 { + return encodeBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeBlockAsm4MB(dst, src) + } + if len(src) >= limit10B { + return encodeBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockAsm8B(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetter(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) > 4<<20 { + return encodeBetterBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeBetterBlockAsm4MB(dst, src) + } + if len(src) >= limit10B { + return encodeBetterBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeBetterBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBetterBlockAsm8B(dst, src) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockSnappy(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) >= 64<<10 { + return encodeSnappyBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeSnappyBlockAsm64K(dst, src) + } + if len(src) >= limit10B { + return encodeSnappyBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeSnappyBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeSnappyBlockAsm8B(dst, src) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) >= 64<<10 { + return encodeSnappyBetterBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeSnappyBetterBlockAsm64K(dst, src) + } + if len(src) >= limit10B { + return encodeSnappyBetterBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeSnappyBetterBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeSnappyBetterBlockAsm8B(dst, src) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go new file mode 100644 index 00000000..44803477 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -0,0 +1,604 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "fmt" + "math/bits" +) + +// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBest(dst, src []byte) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + rep bool + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + if m.rep { + return score - emitRepeatSize(offset, m.length) + } + return score - emitCopySize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32, rep bool) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset, rep: rep} + s += 4 + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) + + { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + if best.length > 0 { + // s+1 + nextShort := sTable[hash4(cv>>8, sTableBits)] + s := s + 1 + cv := load64(src, s) + nextLong := lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + + // s+2 + if true { + nextShort = sTable[hash4(cv>>8, sTableBits)] + s++ + cv = load64(src, s) + nextLong = lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + } + // Search for a match at best match end, see if that is better. + if sAt := best.s + best.length; sAt < sLimit { + sBack := best.s + backL := best.length + // Load initial values + cv = load64(src, sBack) + // Search for mismatch + next := lTable[hash8(load64(src, sAt), lTableBits)] + //next := sTable[hash4(load64(src, sAt), sTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if !best.rep { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + + s += best.length + + if offset > 65535 && s-base <= 5 && !best.rep { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if best.rep { + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], offset, best.length) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], offset, best.length) + } + } else { + d += emitCopy(dst[d:], offset, best.length) + } + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBestSnappy(dst, src []byte) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + + return score - emitCopySize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset} + s += 4 + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv))) + + { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + if best.length > 0 { + // s+1 + nextShort := sTable[hash4(cv>>8, sTableBits)] + s := s + 1 + cv := load64(src, s) + nextLong := lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + + // s+2 + if true { + nextShort = sTable[hash4(cv>>8, sTableBits)] + s++ + cv = load64(src, s) + nextLong = lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + } + // Search for a match at best match end, see if that is better. + if sAt := best.s + best.length; sAt < sLimit { + sBack := best.s + backL := best.length + // Load initial values + cv = load64(src, sBack) + // Search for mismatch + next := lTable[hash8(load64(src, sAt), lTableBits)] + //next := sTable[hash4(load64(src, sAt), sTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if true { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + + s += best.length + + if offset > 65535 && s-base <= 5 { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, best.length) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// emitCopySize returns the size to encode the offset+length +// +// It assumes that: +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopySize(offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeatSize(offset, length) + } + i = 5 + } + if length == 0 { + return i + } + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitRepeatSize(offset, length-60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitRepeatSize returns the number of bytes required to encode a repeat. +// Length must be at least 4 and < 1<<24 +func emitRepeatSize(offset, length int) int { + // Repeat offset, make length cheaper + if length <= 4+4 || (length < 8+4 && offset < 2048) { + return 2 + } + if length < (1<<8)+4+4 { + return 3 + } + if length < (1<<16)+(1<<8)+4 { + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= (1 << 16) - 4 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + if left > 0 { + return 5 + emitRepeatSize(offset, left) + } + return 5 +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go new file mode 100644 index 00000000..943215b8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -0,0 +1,431 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "math/bits" +) + +// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint64, h uint8) uint32 { + const prime4bytes = 2654435761 + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + const prime5bytes = 889523592379 + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + const prime7bytes = 58295818150454627 + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + const prime8bytes = 0xcf1bbcdcb7a56463 + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + // Check repeat at offset checkRep. + const checkRep = 1 + if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Index match start+1 (long) and start+2 (short) + index0 := base + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + cv = load64(src, s) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) + lTable[hash7(cv1, lTableBits)] = uint32(index1) + lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + const maxSkip = 100 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = (s-nextEmit)>>7 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, s-base) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Index match start+1 (long) and start+2 (short) + index0 := base + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + cv = load64(src, s) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) + lTable[hash7(cv1, lTableBits)] = uint32(index1) + lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go new file mode 100644 index 00000000..43d43534 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -0,0 +1,298 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package s2 + +import ( + "math/bits" +) + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlock(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetter(dst, src []byte) (d int) { + return encodeBlockBetterGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + return encodeBlockBetterSnappyGo(dst, src) +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockSnappy(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockSnappyGo(dst, src) +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteral(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + const num = 63<<2 | tagLiteral + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat(dst []byte, offset, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopy(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitRepeat(dst[3:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeat(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitCopyNoRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitCopyNoRepeat(dst[3:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// len(a) <= len(b) +// +func matchLen(a []byte, b []byte) int { + b = b[:len(a)] + var checked int + if len(a) > 4 { + // Try 4 bytes first + if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + return bits.TrailingZeros32(diff) >> 3 + } + // Switch to 8 byte matching. + checked = 4 + a = a[4:] + b = b[4:] + for len(a) >= 8 { + b = b[:len(a)] + if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return int(i) + checked + } + } + return len(a) + checked +} diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go new file mode 100644 index 00000000..d9312e5b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -0,0 +1,189 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +package s2 + +// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm(dst []byte, src []byte) int + +// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm4MB(dst []byte, src []byte) int + +// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm12B(dst []byte, src []byte) int + +// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm10B(dst []byte, src []byte) int + +// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm8B(dst []byte, src []byte) int + +// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm(dst []byte, src []byte) int + +// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm4MB(dst []byte, src []byte) int + +// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm12B(dst []byte, src []byte) int + +// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm10B(dst []byte, src []byte) int + +// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm8B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm(dst []byte, src []byte) int + +// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm64K(dst []byte, src []byte) int + +// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm12B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm10B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm8B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +//go:noescape +func emitLiteral(dst []byte, lit []byte) int + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<32 +// +//go:noescape +func emitRepeat(dst []byte, offset int, length int) int + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopy(dst []byte, offset int, length int) int + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopyNoRepeat(dst []byte, offset int, length int) int + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// len(a) <= len(b) +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s new file mode 100644 index 00000000..729dbf53 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -0,0 +1,16701 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +#include "textflag.h" + +// func encodeBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm + +repeat_extend_back_loop_encodeBlockAsm: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm + +repeat_extend_back_end_encodeBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_repeat_emit_encodeBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_repeat_emit_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +four_bytes_repeat_emit_encodeBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +three_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +two_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm + JMP memmove_long_repeat_emit_encodeBlockAsm + +one_byte_repeat_emit_encodeBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm + +memmove_long_repeat_emit_encodeBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_match4_repeat_extend_encodeBlockAsm + +matchlen_loopback_repeat_extend_encodeBlockAsm: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_loop_repeat_extend_encodeBlockAsm: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm + JZ repeat_extend_forward_end_encodeBlockAsm + +matchlen_match4_repeat_extend_encodeBlockAsm: + CMPL R9, $0x04 + JL matchlen_match2_repeat_extend_encodeBlockAsm + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm + SUBL $0x04, R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm: + CMPL R9, $0x02 + JL matchlen_match1_repeat_extend_encodeBlockAsm + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm + SUBL $0x02, R9 + LEAL 2(R12), R12 + +matchlen_match1_repeat_extend_encodeBlockAsm: + CMPL R9, $0x01 + JL repeat_extend_forward_end_encodeBlockAsm + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_repeat_encodeBlockAsm: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm + CMPL R8, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm + CMPL DI, $0x00000800 + JLT repeat_two_offset_match_repeat_encodeBlockAsm + +cant_repeat_two_offset_match_repeat_encodeBlockAsm: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm + CMPL SI, $0x00010100 + JLT repeat_four_match_repeat_encodeBlockAsm + CMPL SI, $0x0100ffff + JLT repeat_five_match_repeat_encodeBlockAsm + LEAL -16842747(SI), SI + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_repeat_encodeBlockAsm + +repeat_five_match_repeat_encodeBlockAsm: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_match_repeat_encodeBlockAsm: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_match_repeat_encodeBlockAsm: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_match_repeat_encodeBlockAsm: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_match_repeat_encodeBlockAsm: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_as_copy_encodeBlockAsm: + // emitCopy + CMPL DI, $0x00010000 + JL two_byte_offset_repeat_as_copy_encodeBlockAsm + +four_bytes_loop_back_repeat_as_copy_encodeBlockAsm: + CMPL SI, $0x40 + JLE four_bytes_remain_repeat_as_copy_encodeBlockAsm + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(SI), SI + ADDQ $0x05, AX + CMPL SI, $0x04 + JL four_bytes_remain_repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x00010100 + JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x0100ffff + JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy + LEAL -16842747(SI), SI + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + JMP four_bytes_loop_back_repeat_as_copy_encodeBlockAsm + +four_bytes_remain_repeat_as_copy_encodeBlockAsm: + TESTL SI, SI + JZ repeat_end_emit_encodeBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_repeat_as_copy_encodeBlockAsm: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00010100 + JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x0100ffff + JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short + LEAL -16842747(SI), SI + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +emit_copy_three_repeat_as_copy_encodeBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm + +no_repeat_found_encodeBlockAsm: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm + +candidate3_match_encodeBlockAsm: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm + +candidate2_match_encodeBlockAsm: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm + +match_extend_back_loop_encodeBlockAsm: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm + JMP match_extend_back_loop_encodeBlockAsm + +match_extend_back_end_encodeBlockAsm: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 5(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm + CMPL R8, $0x00010000 + JLT three_bytes_match_emit_encodeBlockAsm + CMPL R8, $0x01000000 + JLT four_bytes_match_emit_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeBlockAsm + +four_bytes_match_emit_encodeBlockAsm: + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW R8, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBlockAsm + +three_bytes_match_emit_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm + +two_bytes_match_emit_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm + JMP memmove_long_match_emit_encodeBlockAsm + +one_byte_match_emit_encodeBlockAsm: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm + +memmove_long_match_emit_encodeBlockAsm: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm: +match_nolit_loop_encodeBlockAsm: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeBlockAsm + +matchlen_loopback_match_nolit_encodeBlockAsm: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm + +matchlen_loop_match_nolit_encodeBlockAsm: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm + JZ match_nolit_end_encodeBlockAsm + +matchlen_match4_match_nolit_encodeBlockAsm: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeBlockAsm + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeBlockAsm + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeBlockAsm: + CMPL DI, $0x01 + JL match_nolit_end_encodeBlockAsm + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JL two_byte_offset_match_nolit_encodeBlockAsm + +four_bytes_loop_back_match_nolit_encodeBlockAsm: + CMPL R10, $0x40 + JLE four_bytes_remain_match_nolit_encodeBlockAsm + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(R10), R10 + ADDQ $0x05, AX + CMPL R10, $0x04 + JL four_bytes_remain_match_nolit_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy + CMPL R10, $0x00010100 + JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy + CMPL R10, $0x0100ffff + JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy + LEAL -16842747(R10), R10 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBlockAsm_emit_copy: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeBlockAsm + +four_bytes_remain_match_nolit_encodeBlockAsm: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_match_nolit_encodeBlockAsm: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R10, $0x00010100 + JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R10, $0x0100ffff + JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy_short + LEAL -16842747(R10), R10 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + JMP two_byte_offset_match_nolit_encodeBlockAsm + +two_byte_offset_short_match_nolit_encodeBlockAsm: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +emit_copy_three_match_nolit_encodeBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm + INCL CX + JMP search_loop_encodeBlockAsm + +emit_remainder_encodeBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x01000000 + JLT four_bytes_emit_remainder_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +four_bytes_emit_remainder_encodeBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +three_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +two_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm + JMP memmove_long_emit_remainder_encodeBlockAsm + +one_byte_emit_remainder_encodeBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm + +memmove_long_emit_remainder_encodeBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm4MB(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm4MB(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm4MB: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm4MB: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm4MB + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm4MB + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm4MB + +repeat_extend_back_loop_encodeBlockAsm4MB: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm4MB + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm4MB + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm4MB + +repeat_extend_back_end_encodeBlockAsm4MB: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm4MB + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm4MB + CMPL SI, $0x00010000 + JLT three_bytes_repeat_emit_encodeBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +three_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +two_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm4MB + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +one_byte_repeat_emit_encodeBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm4MB: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB + +memmove_long_repeat_emit_encodeBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm4MB: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_match4_repeat_extend_encodeBlockAsm4MB + +matchlen_loopback_repeat_extend_encodeBlockAsm4MB: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm4MB + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_loop_repeat_extend_encodeBlockAsm4MB: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm4MB + JZ repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match4_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x04 + JL matchlen_match2_repeat_extend_encodeBlockAsm4MB + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB + SUBL $0x04, R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x02 + JL matchlen_match1_repeat_extend_encodeBlockAsm4MB + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB + SUBL $0x02, R9 + LEAL 2(R12), R12 + +matchlen_match1_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x01 + JL repeat_extend_forward_end_encodeBlockAsm4MB + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm4MB + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm4MB: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm4MB + CMPL R8, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JLT repeat_two_offset_match_repeat_encodeBlockAsm4MB + +cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm4MB + CMPL SI, $0x00010100 + JLT repeat_four_match_repeat_encodeBlockAsm4MB + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_match_repeat_encodeBlockAsm4MB: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_match_repeat_encodeBlockAsm4MB: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_match_repeat_encodeBlockAsm4MB: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_match_repeat_encodeBlockAsm4MB: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_as_copy_encodeBlockAsm4MB: + // emitCopy + CMPL DI, $0x00010000 + JL two_byte_offset_repeat_as_copy_encodeBlockAsm4MB + +four_bytes_loop_back_repeat_as_copy_encodeBlockAsm4MB: + CMPL SI, $0x40 + JLE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(SI), SI + ADDQ $0x05, AX + CMPL SI, $0x04 + JL four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00010100 + JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + JMP four_bytes_loop_back_repeat_as_copy_encodeBlockAsm4MB + +four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB: + TESTL SI, SI + JZ repeat_end_emit_encodeBlockAsm4MB + MOVB $0x03, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_repeat_as_copy_encodeBlockAsm4MB: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00010100 + JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (AX) + MOVW SI, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm4MB + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +emit_copy_three_repeat_as_copy_encodeBlockAsm4MB: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm4MB: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm4MB + +no_repeat_found_encodeBlockAsm4MB: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm4MB + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm4MB + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm4MB + +candidate3_match_encodeBlockAsm4MB: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm4MB + +candidate2_match_encodeBlockAsm4MB: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm4MB: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm4MB + +match_extend_back_loop_encodeBlockAsm4MB: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm4MB + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm4MB + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm4MB + JMP match_extend_back_loop_encodeBlockAsm4MB + +match_extend_back_end_encodeBlockAsm4MB: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 4(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm4MB: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm4MB + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm4MB + CMPL R8, $0x00010000 + JLT three_bytes_match_emit_encodeBlockAsm4MB + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW R8, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +three_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +two_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm4MB + JMP memmove_long_match_emit_encodeBlockAsm4MB + +one_byte_match_emit_encodeBlockAsm4MB: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm4MB: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm4MB: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm4MB + +memmove_long_match_emit_encodeBlockAsm4MB: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm4MB: +match_nolit_loop_encodeBlockAsm4MB: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeBlockAsm4MB + +matchlen_loopback_match_nolit_encodeBlockAsm4MB: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm4MB + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_loop_match_nolit_encodeBlockAsm4MB: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm4MB + JZ match_nolit_end_encodeBlockAsm4MB + +matchlen_match4_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeBlockAsm4MB + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm4MB + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeBlockAsm4MB + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm4MB + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x01 + JL match_nolit_end_encodeBlockAsm4MB + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm4MB + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm4MB: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JL two_byte_offset_match_nolit_encodeBlockAsm4MB + +four_bytes_loop_back_match_nolit_encodeBlockAsm4MB: + CMPL R10, $0x40 + JLE four_bytes_remain_match_nolit_encodeBlockAsm4MB + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(R10), R10 + ADDQ $0x05, AX + CMPL R10, $0x04 + JL four_bytes_remain_match_nolit_encodeBlockAsm4MB + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL R10, $0x00010100 + JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + JMP four_bytes_loop_back_match_nolit_encodeBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBlockAsm4MB: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeBlockAsm4MB + MOVB $0x03, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_match_nolit_encodeBlockAsm4MB: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm4MB + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL R10, $0x00010100 + JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + JMP two_byte_offset_match_nolit_encodeBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBlockAsm4MB: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBlockAsm4MB + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +emit_copy_three_match_nolit_encodeBlockAsm4MB: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm4MB: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm4MB + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm4MB: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm4MB + INCL CX + JMP search_loop_encodeBlockAsm4MB + +emit_remainder_encodeBlockAsm4MB: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 4(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm4MB: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +three_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +two_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +one_byte_emit_remainder_encodeBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm4MB: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB + +memmove_long_emit_remainder_encodeBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm4MB: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm12B(SB), $16408-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000080, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm12B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x18, R11 + IMULQ R9, R11 + SHRQ $0x34, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm12B + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm12B + +repeat_extend_back_loop_encodeBlockAsm12B: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm12B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm12B + +repeat_extend_back_end_encodeBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +two_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm12B + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +one_byte_repeat_emit_encodeBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm12B + +memmove_long_repeat_emit_encodeBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm12B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_match4_repeat_extend_encodeBlockAsm12B + +matchlen_loopback_repeat_extend_encodeBlockAsm12B: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_loop_repeat_extend_encodeBlockAsm12B: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm12B + JZ repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match4_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x04 + JL matchlen_match2_repeat_extend_encodeBlockAsm12B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm12B + SUBL $0x04, R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x02 + JL matchlen_match1_repeat_extend_encodeBlockAsm12B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm12B + SUBL $0x02, R9 + LEAL 2(R12), R12 + +matchlen_match1_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x01 + JL repeat_extend_forward_end_encodeBlockAsm12B + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm12B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm12B: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm12B + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm12B + CMPL R8, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B + CMPL DI, $0x00000800 + JLT repeat_two_offset_match_repeat_encodeBlockAsm12B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm12B: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm12B + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_match_repeat_encodeBlockAsm12B: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_match_repeat_encodeBlockAsm12B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_match_repeat_encodeBlockAsm12B: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_as_copy_encodeBlockAsm12B: + // emitCopy +two_byte_offset_repeat_as_copy_encodeBlockAsm12B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm12B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm12B + +no_repeat_found_encodeBlockAsm12B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm12B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm12B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm12B + +candidate3_match_encodeBlockAsm12B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm12B + +candidate2_match_encodeBlockAsm12B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm12B + +match_extend_back_loop_encodeBlockAsm12B: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm12B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm12B + JMP match_extend_back_loop_encodeBlockAsm12B + +match_extend_back_end_encodeBlockAsm12B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm12B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm12B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm12B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm12B + +two_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm12B + JMP memmove_long_match_emit_encodeBlockAsm12B + +one_byte_match_emit_encodeBlockAsm12B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm12B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm12B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm12B + +memmove_long_match_emit_encodeBlockAsm12B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm12B: +match_nolit_loop_encodeBlockAsm12B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeBlockAsm12B + +matchlen_loopback_match_nolit_encodeBlockAsm12B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_loop_match_nolit_encodeBlockAsm12B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm12B + JZ match_nolit_end_encodeBlockAsm12B + +matchlen_match4_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeBlockAsm12B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm12B + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeBlockAsm12B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm12B + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x01 + JL match_nolit_end_encodeBlockAsm12B + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm12B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm12B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBlockAsm12B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm12B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + JMP two_byte_offset_match_nolit_encodeBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBlockAsm12B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm12B + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +emit_copy_three_match_nolit_encodeBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm12B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm12B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x18, R8 + IMULQ R9, R8 + SHRQ $0x34, R8 + SHLQ $0x18, SI + IMULQ R9, SI + SHRQ $0x34, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm12B + INCL CX + JMP search_loop_encodeBlockAsm12B + +emit_remainder_encodeBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm12B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm12B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +two_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm12B + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +one_byte_emit_remainder_encodeBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm12B + +memmove_long_emit_remainder_encodeBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm10B(SB), $4120-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000020, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm10B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x36, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm10B + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm10B + +repeat_extend_back_loop_encodeBlockAsm10B: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm10B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm10B + +repeat_extend_back_end_encodeBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +two_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm10B + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +one_byte_repeat_emit_encodeBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm10B + +memmove_long_repeat_emit_encodeBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm10B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_match4_repeat_extend_encodeBlockAsm10B + +matchlen_loopback_repeat_extend_encodeBlockAsm10B: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_loop_repeat_extend_encodeBlockAsm10B: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm10B + JZ repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match4_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x04 + JL matchlen_match2_repeat_extend_encodeBlockAsm10B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm10B + SUBL $0x04, R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x02 + JL matchlen_match1_repeat_extend_encodeBlockAsm10B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm10B + SUBL $0x02, R9 + LEAL 2(R12), R12 + +matchlen_match1_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x01 + JL repeat_extend_forward_end_encodeBlockAsm10B + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm10B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm10B: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm10B + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm10B + CMPL R8, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B + CMPL DI, $0x00000800 + JLT repeat_two_offset_match_repeat_encodeBlockAsm10B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm10B: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm10B + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_match_repeat_encodeBlockAsm10B: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_match_repeat_encodeBlockAsm10B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_match_repeat_encodeBlockAsm10B: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_as_copy_encodeBlockAsm10B: + // emitCopy +two_byte_offset_repeat_as_copy_encodeBlockAsm10B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x00000800 + JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm10B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm10B + +no_repeat_found_encodeBlockAsm10B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm10B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm10B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm10B + +candidate3_match_encodeBlockAsm10B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm10B + +candidate2_match_encodeBlockAsm10B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm10B + +match_extend_back_loop_encodeBlockAsm10B: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm10B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm10B + JMP match_extend_back_loop_encodeBlockAsm10B + +match_extend_back_end_encodeBlockAsm10B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm10B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm10B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm10B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm10B + +two_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm10B + JMP memmove_long_match_emit_encodeBlockAsm10B + +one_byte_match_emit_encodeBlockAsm10B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm10B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm10B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm10B + +memmove_long_match_emit_encodeBlockAsm10B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm10B: +match_nolit_loop_encodeBlockAsm10B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeBlockAsm10B + +matchlen_loopback_match_nolit_encodeBlockAsm10B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_loop_match_nolit_encodeBlockAsm10B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm10B + JZ match_nolit_end_encodeBlockAsm10B + +matchlen_match4_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeBlockAsm10B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm10B + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeBlockAsm10B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm10B + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x01 + JL match_nolit_end_encodeBlockAsm10B + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm10B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm10B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBlockAsm10B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm10B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL SI, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + JMP two_byte_offset_match_nolit_encodeBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBlockAsm10B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm10B + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +emit_copy_three_match_nolit_encodeBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm10B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm10B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm10B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x36, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x36, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm10B + INCL CX + JMP search_loop_encodeBlockAsm10B + +emit_remainder_encodeBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm10B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm10B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +two_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm10B + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +one_byte_emit_remainder_encodeBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm10B + +memmove_long_emit_remainder_encodeBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm8B(SB), $1048-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000008, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBlockAsm8B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x38, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm8B + LEAL 1(CX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm8B + +repeat_extend_back_loop_encodeBlockAsm8B: + CMPL DI, R8 + JLE repeat_extend_back_end_encodeBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeBlockAsm8B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm8B + +repeat_extend_back_end_encodeBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +two_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeBlockAsm8B + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +one_byte_repeat_emit_encodeBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm8B + +memmove_long_repeat_emit_encodeBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeBlockAsm8B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL CX, R9 + LEAQ (DX)(CX*1), R10 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R12, R12 + CMPL R9, $0x08 + JL matchlen_match4_repeat_extend_encodeBlockAsm8B + +matchlen_loopback_repeat_extend_encodeBlockAsm8B: + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_repeat_extend_encodeBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_loop_repeat_extend_encodeBlockAsm8B: + LEAL -8(R9), R9 + LEAL 8(R12), R12 + CMPL R9, $0x08 + JGE matchlen_loopback_repeat_extend_encodeBlockAsm8B + JZ repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match4_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x04 + JL matchlen_match2_repeat_extend_encodeBlockAsm8B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm8B + SUBL $0x04, R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x02 + JL matchlen_match1_repeat_extend_encodeBlockAsm8B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm8B + SUBL $0x02, R9 + LEAL 2(R12), R12 + +matchlen_match1_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x01 + JL repeat_extend_forward_end_encodeBlockAsm8B + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm8B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm8B: + ADDL R12, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm8B + + // emitRepeat + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JLE repeat_two_match_repeat_encodeBlockAsm8B + CMPL DI, $0x0c + JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm8B: + CMPL SI, $0x00000104 + JLT repeat_three_match_repeat_encodeBlockAsm8B + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_match_repeat_encodeBlockAsm8B: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_match_repeat_encodeBlockAsm8B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_as_copy_encodeBlockAsm8B: + // emitCopy +two_byte_offset_repeat_as_copy_encodeBlockAsm8B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + + // emitRepeat + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JLE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + CMPL DI, $0x0c + JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + CMPL SI, $0x00000104 + JLT repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (AX) + MOVW SI, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (AX) + MOVB SI, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + JMP two_byte_offset_repeat_as_copy_encodeBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm8B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm8B + +no_repeat_found_encodeBlockAsm8B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm8B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm8B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm8B + +candidate3_match_encodeBlockAsm8B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm8B + +candidate2_match_encodeBlockAsm8B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm8B + +match_extend_back_loop_encodeBlockAsm8B: + CMPL CX, DI + JLE match_extend_back_end_encodeBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBlockAsm8B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBlockAsm8B + JMP match_extend_back_loop_encodeBlockAsm8B + +match_extend_back_end_encodeBlockAsm8B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm8B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeBlockAsm8B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeBlockAsm8B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm8B + +two_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeBlockAsm8B + JMP memmove_long_match_emit_encodeBlockAsm8B + +one_byte_match_emit_encodeBlockAsm8B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm8B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm8B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeBlockAsm8B + +memmove_long_match_emit_encodeBlockAsm8B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeBlockAsm8B: +match_nolit_loop_encodeBlockAsm8B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeBlockAsm8B + +matchlen_loopback_match_nolit_encodeBlockAsm8B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_loop_match_nolit_encodeBlockAsm8B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeBlockAsm8B + JZ match_nolit_end_encodeBlockAsm8B + +matchlen_match4_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeBlockAsm8B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm8B + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeBlockAsm8B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm8B + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x01 + JL match_nolit_end_encodeBlockAsm8B + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm8B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm8B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBlockAsm8B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBlockAsm8B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat + MOVL R10, SI + LEAL -4(R10), R10 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + JMP two_byte_offset_match_nolit_encodeBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBlockAsm8B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +emit_copy_three_match_nolit_encodeBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm8B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBlockAsm8B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm8B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x38, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x38, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm8B + INCL CX + JMP search_loop_encodeBlockAsm8B + +emit_remainder_encodeBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBlockAsm8B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBlockAsm8B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +two_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBlockAsm8B + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +one_byte_emit_remainder_encodeBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm8B + +memmove_long_emit_remainder_encodeBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JLE check_maxskip_ok_encodeBetterBlockAsm + LEAL 100(CX), SI + JMP check_maxskip_cont_encodeBetterBlockAsm + +check_maxskip_ok_encodeBetterBlockAsm: + LEAL 1(CX)(SI*1), SI + +check_maxskip_cont_encodeBetterBlockAsm: + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 262168(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 262168(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm + +candidateS_match_encodeBetterBlockAsm: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm + +match_extend_back_loop_encodeBetterBlockAsm: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm + JMP match_extend_back_loop_encodeBetterBlockAsm + +match_extend_back_end_encodeBetterBlockAsm: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 5(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeBetterBlockAsm + +matchlen_loopback_match_nolit_encodeBetterBlockAsm: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_loop_match_nolit_encodeBetterBlockAsm: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm + JZ match_nolit_end_encodeBetterBlockAsm + +matchlen_match4_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeBetterBlockAsm + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeBetterBlockAsm + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x01 + JL match_nolit_end_encodeBetterBlockAsm + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm + CMPL R12, $0x01 + JG match_length_ok_encodeBetterBlockAsm + CMPL R8, $0x0000ffff + JLE match_length_ok_encodeBetterBlockAsm + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeBetterBlockAsm + +match_length_ok_encodeBetterBlockAsm: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_encodeBetterBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_match_emit_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +four_bytes_match_emit_encodeBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +three_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +two_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm + JMP memmove_long_match_emit_encodeBetterBlockAsm + +one_byte_match_emit_encodeBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm + +memmove_long_match_emit_encodeBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JL two_byte_offset_match_nolit_encodeBetterBlockAsm + +four_bytes_loop_back_match_nolit_encodeBetterBlockAsm: + CMPL R12, $0x40 + JLE four_bytes_remain_match_nolit_encodeBetterBlockAsm + MOVB $0xff, (AX) + MOVL R8, 1(AX) + LEAL -64(R12), R12 + ADDQ $0x05, AX + CMPL R12, $0x04 + JL four_bytes_remain_match_nolit_encodeBetterBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R12, $0x0100ffff + JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy + LEAL -16842747(R12), R12 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeBetterBlockAsm + +four_bytes_remain_match_nolit_encodeBetterBlockAsm: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_match_nolit_encodeBetterBlockAsm: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R12, $0x0100ffff + JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short + LEAL -16842747(R12), R12 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +emit_copy_three_match_nolit_encodeBetterBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +match_is_repeat_encodeBetterBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_match_emit_repeat_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +four_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +three_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +two_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +one_byte_match_emit_repeat_encodeBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + +memmove_long_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat +emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_repeat_encodeBetterBlockAsm + CMPL R12, $0x0100ffff + JLT repeat_five_match_nolit_repeat_encodeBetterBlockAsm + LEAL -16842747(R12), R12 + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm + +repeat_five_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x32, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 262168(SP)(R11*4) + MOVL R15, 262168(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 262168(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm + +emit_remainder_encodeBetterBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x01000000 + JLT four_bytes_emit_remainder_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +four_bytes_emit_remainder_encodeBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +three_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +two_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +one_byte_emit_remainder_encodeBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm + +memmove_long_emit_remainder_encodeBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm4MB(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm4MB: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm4MB: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JLE check_maxskip_ok_encodeBetterBlockAsm4MB + LEAL 100(CX), SI + JMP check_maxskip_cont_encodeBetterBlockAsm4MB + +check_maxskip_ok_encodeBetterBlockAsm4MB: + LEAL 1(CX)(SI*1), SI + +check_maxskip_cont_encodeBetterBlockAsm4MB: + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm4MB + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 262168(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 262168(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm4MB + +candidateS_match_encodeBetterBlockAsm4MB: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm4MB + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm4MB: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm4MB + +match_extend_back_loop_encodeBetterBlockAsm4MB: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm4MB + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm4MB + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm4MB + JMP match_extend_back_loop_encodeBetterBlockAsm4MB + +match_extend_back_end_encodeBetterBlockAsm4MB: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 4(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm4MB: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeBetterBlockAsm4MB + +matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_loop_match_nolit_encodeBetterBlockAsm4MB: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB + JZ match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match4_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x01 + JL match_nolit_end_encodeBetterBlockAsm4MB + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm4MB + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm4MB: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm4MB + CMPL R12, $0x01 + JG match_length_ok_encodeBetterBlockAsm4MB + CMPL R8, $0x0000ffff + JLE match_length_ok_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeBetterBlockAsm4MB + +match_length_ok_encodeBetterBlockAsm4MB: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm4MB + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm4MB + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_encodeBetterBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +three_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +two_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +one_byte_match_emit_encodeBetterBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm4MB: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB + +memmove_long_match_emit_encodeBetterBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm4MB: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JL two_byte_offset_match_nolit_encodeBetterBlockAsm4MB + +four_bytes_loop_back_match_nolit_encodeBetterBlockAsm4MB: + CMPL R12, $0x40 + JLE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + MOVB $0xff, (AX) + MOVL R8, 1(AX) + LEAL -64(R12), R12 + ADDQ $0x05, AX + CMPL R12, $0x04 + JL four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + JMP four_bytes_loop_back_match_nolit_encodeBetterBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + MOVB $0x03, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_match_nolit_encodeBetterBlockAsm4MB: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +emit_copy_three_match_nolit_encodeBetterBlockAsm4MB: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +match_is_repeat_encodeBetterBlockAsm4MB: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +one_byte_match_emit_repeat_encodeBetterBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + +memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R12, $0x00010100 + JLT repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (AX) + MOVW R12, 2(AX) + SARL $0x10, R8 + MOVB R8, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm4MB + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm4MB: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x32, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 262168(SP)(R11*4) + MOVL R15, 262168(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 262168(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm4MB + +emit_remainder_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 4(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeBetterBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +three_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +two_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +one_byte_emit_remainder_encodeBetterBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + +memmove_long_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm12B(SB), $81944-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000280, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm12B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL 24(SP)(R10*4), SI + MOVL 65560(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 65560(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm12B + +candidateS_match_encodeBetterBlockAsm12B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm12B + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm12B + +match_extend_back_loop_encodeBetterBlockAsm12B: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm12B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm12B + JMP match_extend_back_loop_encodeBetterBlockAsm12B + +match_extend_back_end_encodeBetterBlockAsm12B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm12B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeBetterBlockAsm12B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm12B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_loop_match_nolit_encodeBetterBlockAsm12B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B + JZ match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeBetterBlockAsm12B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeBetterBlockAsm12B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x01 + JL match_nolit_end_encodeBetterBlockAsm12B + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm12B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm12B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm12B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +two_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +one_byte_match_emit_encodeBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B + +memmove_long_match_emit_encodeBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm12B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBetterBlockAsm12B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeBetterBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +match_is_repeat_encodeBetterBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +one_byte_match_emit_repeat_encodeBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm12B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm12B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x32, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x34, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 65560(SP)(R11*4) + MOVL R15, 65560(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x32, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 65560(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm12B + +emit_remainder_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm12B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +two_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +one_byte_emit_remainder_encodeBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + +memmove_long_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm10B(SB), $20504-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x000000a0, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm10B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL 24(SP)(R10*4), SI + MOVL 16408(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 16408(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm10B + +candidateS_match_encodeBetterBlockAsm10B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm10B + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm10B + +match_extend_back_loop_encodeBetterBlockAsm10B: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm10B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm10B + JMP match_extend_back_loop_encodeBetterBlockAsm10B + +match_extend_back_end_encodeBetterBlockAsm10B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm10B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeBetterBlockAsm10B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm10B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_loop_match_nolit_encodeBetterBlockAsm10B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B + JZ match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeBetterBlockAsm10B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeBetterBlockAsm10B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x01 + JL match_nolit_end_encodeBetterBlockAsm10B + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm10B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm10B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm10B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +two_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +one_byte_match_emit_encodeBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B + +memmove_long_match_emit_encodeBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm10B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBetterBlockAsm10B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeBetterBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +match_is_repeat_encodeBetterBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +one_byte_match_emit_repeat_encodeBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm10B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm10B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x34, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x36, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 16408(SP)(R11*4) + MOVL R15, 16408(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x34, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 16408(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm10B + +emit_remainder_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm10B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +two_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +one_byte_emit_remainder_encodeBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + +memmove_long_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm8B(SB), $5144-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000028, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm8B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x38, R11 + MOVL 24(SP)(R10*4), SI + MOVL 4120(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 4120(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm8B + +candidateS_match_encodeBetterBlockAsm8B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm8B + DECL CX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm8B + +match_extend_back_loop_encodeBetterBlockAsm8B: + CMPL CX, DI + JLE match_extend_back_end_encodeBetterBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeBetterBlockAsm8B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm8B + JMP match_extend_back_loop_encodeBetterBlockAsm8B + +match_extend_back_end_encodeBetterBlockAsm8B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm8B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeBetterBlockAsm8B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm8B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_loop_match_nolit_encodeBetterBlockAsm8B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B + JZ match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeBetterBlockAsm8B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeBetterBlockAsm8B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x01 + JL match_nolit_end_encodeBetterBlockAsm8B + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm8B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm8B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm8B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeBetterBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +two_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +one_byte_match_emit_encodeBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (AX) + MOVL R10, -4(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B + +memmove_long_match_emit_encodeBetterBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm8B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeBetterBlockAsm8B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + JMP two_byte_offset_match_nolit_encodeBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeBetterBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeBetterBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +match_is_repeat_encodeBetterBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_repeat_encodeBetterBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_repeat_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +one_byte_match_emit_repeat_encodeBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x04 + JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R11 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B + CMPL SI, $0x0c + JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B: + CMPL R12, $0x00000104 + JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B + LEAL -256(R12), R12 + MOVW $0x0019, (AX) + MOVW R12, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B: + LEAL -4(R12), R12 + MOVW $0x0015, (AX) + MOVB R12, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(AX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm8B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeBetterBlockAsm8B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x36, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x38, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 4120(SP)(R11*4) + MOVL R15, 4120(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x36, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 4120(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeBetterBlockAsm8B + +emit_remainder_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeBetterBlockAsm8B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +two_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +one_byte_emit_remainder_encodeBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x04 + JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (CX), SI + MOVL SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + +memmove_long_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm + +repeat_extend_back_loop_encodeSnappyBlockAsm: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm + +repeat_extend_back_end_encodeSnappyBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_repeat_emit_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +four_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVL SI, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +three_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +two_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +one_byte_repeat_emit_encodeSnappyBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm + +memmove_long_repeat_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm + JZ repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x04 + JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm + SUBL $0x04, R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x02 + JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + SUBL $0x02, R8 + LEAL 2(R11), R11 + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x01 + JL repeat_extend_forward_end_encodeSnappyBlockAsm + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy + CMPL DI, $0x00010000 + JL two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x40 + JLE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(SI), SI + ADDQ $0x05, AX + CMPL SI, $0x04 + JL four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm: + TESTL SI, SI + JZ repeat_end_emit_encodeSnappyBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeSnappyBlockAsm + +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm + +no_repeat_found_encodeSnappyBlockAsm: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm + +candidate3_match_encodeSnappyBlockAsm: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm + +candidate2_match_encodeSnappyBlockAsm: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm + +match_extend_back_loop_encodeSnappyBlockAsm: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm + JMP match_extend_back_loop_encodeSnappyBlockAsm + +match_extend_back_end_encodeSnappyBlockAsm: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 5(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x00010000 + JLT three_bytes_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x01000000 + JLT four_bytes_match_emit_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +four_bytes_match_emit_encodeSnappyBlockAsm: + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW R8, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +three_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +two_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +one_byte_match_emit_encodeSnappyBlockAsm: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm + +memmove_long_match_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm: +match_nolit_loop_encodeSnappyBlockAsm: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBlockAsm + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_loop_match_nolit_encodeSnappyBlockAsm: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm + JZ match_nolit_end_encodeSnappyBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBlockAsm + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBlockAsm + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x01 + JL match_nolit_end_encodeSnappyBlockAsm + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JL two_byte_offset_match_nolit_encodeSnappyBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x40 + JLE four_bytes_remain_match_nolit_encodeSnappyBlockAsm + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(R10), R10 + ADDQ $0x05, AX + CMPL R10, $0x04 + JL four_bytes_remain_match_nolit_encodeSnappyBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBlockAsm: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm + INCL CX + JMP search_loop_encodeSnappyBlockAsm + +emit_remainder_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x01000000 + JLT four_bytes_emit_remainder_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +four_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +three_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +two_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +one_byte_emit_remainder_encodeSnappyBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm + +memmove_long_emit_remainder_encodeSnappyBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm64K: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm64K: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm64K + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm64K + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm64K + +repeat_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K + +repeat_extend_back_end_encodeSnappyBlockAsm64K: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm64K + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm64K + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +two_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm64K + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +one_byte_repeat_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + +memmove_long_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K + JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x04 + JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + SUBL $0x04, R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x02 + JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + SUBL $0x02, R8 + LEAL 2(R11), R11 + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x01 + JL repeat_extend_forward_end_encodeSnappyBlockAsm64K + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm64K: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm64K + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm64K: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm64K + +no_repeat_found_encodeSnappyBlockAsm64K: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm64K + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm64K + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm64K + +candidate3_match_encodeSnappyBlockAsm64K: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm64K + +candidate2_match_encodeSnappyBlockAsm64K: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm64K: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm64K + +match_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBlockAsm64K + +match_extend_back_end_encodeSnappyBlockAsm64K: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm64K: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm64K + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm64K + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +two_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +one_byte_match_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm64K: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K + +memmove_long_match_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm64K: +match_nolit_loop_encodeSnappyBlockAsm64K: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBlockAsm64K + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_loop_match_nolit_encodeSnappyBlockAsm64K: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K + JZ match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x01 + JL match_nolit_end_encodeSnappyBlockAsm64K + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm64K + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm64K: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm64K: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBlockAsm64K: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm64K: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm64K + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm64K: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm64K + INCL CX + JMP search_loop_encodeSnappyBlockAsm64K + +emit_remainder_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm64K + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm64K + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000080, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm12B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x18, R11 + IMULQ R9, R11 + SHRQ $0x34, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm12B + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm12B + +repeat_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B + +repeat_extend_back_end_encodeSnappyBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +two_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm12B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +one_byte_repeat_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + +memmove_long_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B + JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x04 + JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + SUBL $0x04, R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x02 + JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + SUBL $0x02, R8 + LEAL 2(R11), R11 + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x01 + JL repeat_extend_forward_end_encodeSnappyBlockAsm12B + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm12B: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm12B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm12B + +no_repeat_found_encodeSnappyBlockAsm12B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm12B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm12B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm12B + +candidate3_match_encodeSnappyBlockAsm12B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm12B + +candidate2_match_encodeSnappyBlockAsm12B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm12B + +match_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBlockAsm12B + +match_extend_back_end_encodeSnappyBlockAsm12B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm12B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm12B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm12B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +two_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +one_byte_match_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm12B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B + +memmove_long_match_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm12B: +match_nolit_loop_encodeSnappyBlockAsm12B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBlockAsm12B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm12B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B + JZ match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x01 + JL match_nolit_end_encodeSnappyBlockAsm12B + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm12B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm12B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm12B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm12B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm12B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x18, R8 + IMULQ R9, R8 + SHRQ $0x34, R8 + SHLQ $0x18, SI + IMULQ R9, SI + SHRQ $0x34, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm12B + INCL CX + JMP search_loop_encodeSnappyBlockAsm12B + +emit_remainder_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm12B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm12B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000020, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm10B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x36, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm10B + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm10B + +repeat_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B + +repeat_extend_back_end_encodeSnappyBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +two_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm10B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +one_byte_repeat_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + +memmove_long_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B + JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x04 + JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + SUBL $0x04, R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x02 + JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + SUBL $0x02, R8 + LEAL 2(R11), R11 + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x01 + JL repeat_extend_forward_end_encodeSnappyBlockAsm10B + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm10B: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + CMPL DI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm10B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm10B + +no_repeat_found_encodeSnappyBlockAsm10B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm10B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm10B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm10B + +candidate3_match_encodeSnappyBlockAsm10B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm10B + +candidate2_match_encodeSnappyBlockAsm10B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm10B + +match_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBlockAsm10B + +match_extend_back_end_encodeSnappyBlockAsm10B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm10B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm10B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm10B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +two_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +one_byte_match_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm10B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B + +memmove_long_match_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm10B: +match_nolit_loop_encodeSnappyBlockAsm10B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBlockAsm10B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm10B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B + JZ match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x01 + JL match_nolit_end_encodeSnappyBlockAsm10B + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm10B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm10B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm10B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + CMPL SI, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm10B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm10B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm10B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x36, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x36, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm10B + INCL CX + JMP search_loop_encodeSnappyBlockAsm10B + +emit_remainder_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm10B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm10B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000008, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm8B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x38, R11 + MOVL 24(SP)(R10*4), SI + MOVL 24(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + LEAL 1(CX), R10 + MOVL R10, 24(SP)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + MOVL CX, R9 + SUBL 16(SP), R9 + MOVL 1(DX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm8B + LEAL 1(CX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm8B + +repeat_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL DI, SI + JLE repeat_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(DX)(R8*1), BL + MOVB -1(DX)(DI*1), R9 + CMPB BL, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B + +repeat_extend_back_end_encodeSnappyBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_repeat_emit_encodeSnappyBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_repeat_emit_encodeSnappyBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +two_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_repeat_emit_encodeSnappyBlockAsm8B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +one_byte_repeat_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + +memmove_long_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ SI, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B: + ADDL $0x05, CX + MOVL CX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B: + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R10, R10 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B + JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x04 + JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + SUBL $0x04, R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x02 + JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + SUBL $0x02, R8 + LEAL 2(R11), R11 + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x01 + JL repeat_extend_forward_end_encodeSnappyBlockAsm8B + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm8B: + ADDL R11, CX + MOVL CX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B: + CMPL SI, $0x40 + JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(SI), SI + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B: + CMPL SI, $0x0c + JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(SI*4), SI + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(SI*4), SI + MOVB SI, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm8B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm8B + +no_repeat_found_encodeSnappyBlockAsm8B: + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm8B + SHRQ $0x08, DI + MOVL 24(SP)(R10*4), SI + LEAL 2(CX), R9 + CMPL (DX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm8B + MOVL R9, 24(SP)(R10*4) + SHRQ $0x08, DI + CMPL (DX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm8B + +candidate3_match_encodeSnappyBlockAsm8B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm8B + +candidate2_match_encodeSnappyBlockAsm8B: + MOVL R9, 24(SP)(R10*4) + INCL CX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm8B + +match_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBlockAsm8B + +match_extend_back_end_encodeSnappyBlockAsm8B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm8B: + MOVL CX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JLT one_byte_match_emit_encodeSnappyBlockAsm8B + CMPL R8, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBlockAsm8B + MOVB $0xf4, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +two_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB R8, 1(AX) + ADDQ $0x02, AX + CMPL R8, $0x40 + JL memmove_match_emit_encodeSnappyBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +one_byte_match_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, R8 + MOVB R8, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (AX) + MOVQ DI, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm8B: + MOVQ R8, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B + +memmove_long_match_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R8, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm8B: +match_nolit_loop_encodeSnappyBlockAsm8B: + MOVL CX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(SI*1), SI + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBlockAsm8B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B: + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R9, R9 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm8B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B + JZ match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x01 + JL match_nolit_end_encodeSnappyBlockAsm8B + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm8B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm8B: + ADDL R10, CX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm8B: + CMPL R10, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B: + CMPL R10, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(R10*4), R10 + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(R10*4), R10 + MOVB R10, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm8B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBlockAsm8B + MOVQ -2(DX)(CX*1), DI + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm8B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x38, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x38, SI + LEAL -2(CX), R9 + LEAQ 24(SP)(SI*4), R10 + MOVL (R10), SI + MOVL R9, 24(SP)(R8*4) + MOVL CX, (R10) + CMPL (DX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm8B + INCL CX + JMP search_loop_encodeSnappyBlockAsm8B + +emit_remainder_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBlockAsm8B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBlockAsm8B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JLE check_maxskip_ok_encodeSnappyBetterBlockAsm + LEAL 100(CX), SI + JMP check_maxskip_cont_encodeSnappyBetterBlockAsm + +check_maxskip_ok_encodeSnappyBetterBlockAsm: + LEAL 1(CX)(SI*1), SI + +check_maxskip_cont_encodeSnappyBetterBlockAsm: + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 262168(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 262168(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm + +candidateS_match_encodeSnappyBetterBlockAsm: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + +match_extend_back_loop_encodeSnappyBetterBlockAsm: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm + +match_extend_back_end_encodeSnappyBetterBlockAsm: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 5(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm + JZ match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x01 + JL match_nolit_end_encodeSnappyBetterBlockAsm + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL R12, $0x01 + JG match_length_ok_encodeSnappyBetterBlockAsm + CMPL R8, $0x0000ffff + JLE match_length_ok_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeSnappyBetterBlockAsm + +match_length_ok_encodeSnappyBetterBlockAsm: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x01000000 + JLT four_bytes_match_emit_encodeSnappyBetterBlockAsm + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +four_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +three_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +two_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +one_byte_match_emit_encodeSnappyBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + +memmove_long_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JL two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x40 + JLE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xff, (AX) + MOVL R8, 1(AX) + LEAL -64(R12), R12 + ADDQ $0x05, AX + CMPL R12, $0x04 + JL four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + MOVB $0x03, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVL R8, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x32, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 262168(SP)(R11*4) + MOVL R15, 262168(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 262168(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm + +emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00010000 + JLT three_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x01000000 + JLT four_bytes_emit_remainder_encodeSnappyBetterBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +four_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm64K: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm64K: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL 24(SP)(R10*4), SI + MOVL 262168(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 262168(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm64K + +candidateS_match_encodeSnappyBetterBlockAsm64K: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm64K: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + +match_extend_back_loop_encodeSnappyBetterBlockAsm64K: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm64K + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K + +match_extend_back_end_encodeSnappyBetterBlockAsm64K: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm64K: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K + JZ match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x01 + JL match_nolit_end_encodeSnappyBetterBlockAsm64K + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm64K + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm64K: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm64K + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm64K + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +two_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +one_byte_match_emit_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + +memmove_long_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm64K + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x32, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 262168(SP)(R11*4) + MOVL R15, 262168(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R13 + IMULQ SI, R13 + SHRQ $0x30, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 262168(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm64K + +emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000280, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL 24(SP)(R10*4), SI + MOVL 65560(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 65560(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm12B + +candidateS_match_encodeSnappyBetterBlockAsm12B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + +match_extend_back_loop_encodeSnappyBetterBlockAsm12B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm12B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B + +match_extend_back_end_encodeSnappyBetterBlockAsm12B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm12B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B + JZ match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x01 + JL match_nolit_end_encodeSnappyBetterBlockAsm12B + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm12B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm12B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm12B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +one_byte_match_emit_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm12B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x32, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x34, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 65560(SP)(R11*4) + MOVL R15, 65560(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x32, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 65560(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm12B + +emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x000000a0, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL 24(SP)(R10*4), SI + MOVL 16408(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 16408(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm10B + +candidateS_match_encodeSnappyBetterBlockAsm10B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + +match_extend_back_loop_encodeSnappyBetterBlockAsm10B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm10B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B + +match_extend_back_end_encodeSnappyBetterBlockAsm10B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm10B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B + JZ match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x01 + JL match_nolit_end_encodeSnappyBetterBlockAsm10B + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm10B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm10B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm10B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +one_byte_match_emit_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + CMPL R8, $0x00000800 + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm10B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x34, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x36, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 16408(SP)(R11*4) + MOVL R15, 16408(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x34, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 16408(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm10B + +emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000028, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), SI + MOVL SI, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 1(CX)(SI*1), SI + CMPL SI, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(CX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x38, R11 + MOVL 24(SP)(R10*4), SI + MOVL 4120(SP)(R11*4), R8 + MOVL CX, 24(SP)(R10*4) + MOVL CX, 4120(SP)(R11*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL (DX)(R8*1), DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm8B + +candidateS_match_encodeSnappyBetterBlockAsm8B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R10*4), SI + INCL CX + MOVL CX, 24(SP)(R10*4) + CMPL (DX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + DECL CX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + +match_extend_back_loop_encodeSnappyBetterBlockAsm8B: + CMPL CX, DI + JLE match_extend_back_end_encodeSnappyBetterBlockAsm8B + MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(CX*1), R8 + CMPB BL, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B + LEAL -1(CX), CX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B + +match_extend_back_end_encodeSnappyBetterBlockAsm8B: + MOVL CX, DI + SUBL 12(SP), DI + LEAQ 3(AX)(DI*1), DI + CMPQ DI, (SP) + JL match_dst_size_check_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm8B: + MOVL CX, DI + ADDL $0x04, CX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(SI*1), R10 + + // matchLen + XORL R12, R12 + CMPL R8, $0x08 + JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + TESTQ R11, R11 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ R11, R11 + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B: + LEAL -8(R8), R8 + LEAL 8(R12), R12 + CMPL R8, $0x08 + JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B + JZ match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x04 + JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + SUBL $0x04, R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x02 + JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + SUBL $0x02, R8 + LEAL 2(R12), R12 + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x01 + JL match_nolit_end_encodeSnappyBetterBlockAsm8B + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm8B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm8B: + MOVL CX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (DX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_encodeSnappyBetterBlockAsm8B + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_encodeSnappyBetterBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +one_byte_match_emit_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (AX) + MOVQ R10, -8(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B: + MOVQ SI, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ SI, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B: + ADDL R12, CX + ADDL $0x04, R12 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R12, $0x40 + JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R12), R12 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R12, $0x0c + JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B + MOVB $0x01, BL + LEAL -16(BX)(R12*4), R12 + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVB $0x02, BL + LEAL -4(BX)(R12*4), R12 + MOVB R12, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: + CMPL CX, 8(SP) + JGE emit_remainder_encodeSnappyBetterBlockAsm8B + CMPQ AX, (SP) + JL match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + INCL DI + MOVQ (DX)(DI*1), R9 + MOVQ R9, R10 + MOVQ R9, R11 + MOVQ R9, R12 + SHRQ $0x08, R11 + MOVQ R11, R13 + SHRQ $0x10, R12 + LEAL 1(DI), R14 + LEAL 2(DI), R15 + MOVQ -2(DX)(CX*1), R9 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x36, R13 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x20, R12 + IMULQ R8, R12 + SHRQ $0x38, R12 + MOVL DI, 24(SP)(R10*4) + MOVL R14, 24(SP)(R13*4) + MOVL R14, 4120(SP)(R11*4) + MOVL R15, 4120(SP)(R12*4) + MOVQ R9, R10 + MOVQ R9, R11 + SHRQ $0x08, R11 + MOVQ R11, R13 + LEAL -2(CX), R9 + LEAL -1(CX), DI + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x10, R13 + IMULQ SI, R13 + SHRQ $0x36, R13 + MOVL R9, 24(SP)(R10*4) + MOVL DI, 4120(SP)(R11*4) + MOVL DI, 24(SP)(R13*4) + JMP search_loop_encodeSnappyBetterBlockAsm8B + +emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JLT one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B + CMPL DX, $0x00000100 + JLT two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JL memmove_emit_remainder_encodeSnappyBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x08 + JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8: + MOVQ (CX), SI + MOVQ SI, (AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func emitLiteral(dst []byte, lit []byte) int +// Requires: SSE2 +TEXT ·emitLiteral(SB), NOSPLIT, $0-56 + MOVQ lit_len+32(FP), DX + MOVQ dst_base+0(FP), AX + MOVQ lit_base+24(FP), CX + TESTQ DX, DX + JZ emit_literal_end_standalone_skip + MOVL DX, BX + LEAL -1(DX), SI + CMPL SI, $0x3c + JLT one_byte_standalone + CMPL SI, $0x00000100 + JLT two_bytes_standalone + CMPL SI, $0x00010000 + JLT three_bytes_standalone + CMPL SI, $0x01000000 + JLT four_bytes_standalone + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP memmove_long_standalone + +four_bytes_standalone: + MOVL SI, DI + SHRL $0x10, DI + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB DI, 3(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP memmove_long_standalone + +three_bytes_standalone: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP memmove_long_standalone + +two_bytes_standalone: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_standalone + JMP memmove_long_standalone + +one_byte_standalone: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, BX + ADDQ $0x01, AX + +memmove_standalone: + // genMemMoveShort + CMPQ DX, $0x03 + JB emit_lit_memmove_standalone_memmove_move_1or2 + JE emit_lit_memmove_standalone_memmove_move_3 + CMPQ DX, $0x08 + JB emit_lit_memmove_standalone_memmove_move_4through7 + CMPQ DX, $0x10 + JBE emit_lit_memmove_standalone_memmove_move_8through16 + CMPQ DX, $0x20 + JBE emit_lit_memmove_standalone_memmove_move_17through32 + JMP emit_lit_memmove_standalone_memmove_move_33through64 + +emit_lit_memmove_standalone_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(DX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(DX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(DX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(DX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +memmove_long_standalone: + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVQ DX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_standalonelarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_standalonelarge_big_loop_back + +emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ DX, R8 + JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +emit_literal_end_standalone_skip: + XORQ BX, BX + +emit_literal_end_standalone: + MOVQ BX, ret+48(FP) + RET + +// func emitRepeat(dst []byte, offset int, length int) int +TEXT ·emitRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitRepeat +emit_repeat_again_standalone: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone + +cant_repeat_two_offset_standalone: + CMPL DX, $0x00000104 + JLT repeat_three_standalone + CMPL DX, $0x00010100 + JLT repeat_four_standalone + CMPL DX, $0x0100ffff + JLT repeat_five_standalone + LEAL -16842747(DX), DX + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone + +repeat_five_standalone: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_repeat_end + +repeat_four_standalone: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_repeat_end + +repeat_three_standalone: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_repeat_end + +repeat_two_standalone: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_repeat_end + +repeat_two_offset_standalone: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + +gen_emit_repeat_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopy(dst []byte, offset int, length int) int +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JL two_byte_offset_standalone + +four_bytes_loop_back_standalone: + CMPL DX, $0x40 + JLE four_bytes_remain_standalone + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JL four_bytes_remain_standalone + + // emitRepeat +emit_repeat_again_standalone_emit_copy: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone_emit_copy + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone_emit_copy + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone_emit_copy + +cant_repeat_two_offset_standalone_emit_copy: + CMPL DX, $0x00000104 + JLT repeat_three_standalone_emit_copy + CMPL DX, $0x00010100 + JLT repeat_four_standalone_emit_copy + CMPL DX, $0x0100ffff + JLT repeat_five_standalone_emit_copy + LEAL -16842747(DX), DX + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy + +repeat_five_standalone_emit_copy: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + JMP four_bytes_loop_back_standalone + +four_bytes_remain_standalone: + TESTL DX, DX + JZ gen_emit_copy_end + MOVB $0x03, SI + LEAL -4(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +two_byte_offset_standalone: + CMPL DX, $0x40 + JLE two_byte_offset_short_standalone + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + + // emitRepeat +emit_repeat_again_standalone_emit_copy_short: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone_emit_copy_short + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone_emit_copy_short + +cant_repeat_two_offset_standalone_emit_copy_short: + CMPL DX, $0x00000104 + JLT repeat_three_standalone_emit_copy_short + CMPL DX, $0x00010100 + JLT repeat_four_standalone_emit_copy_short + CMPL DX, $0x0100ffff + JLT repeat_five_standalone_emit_copy_short + LEAL -16842747(DX), DX + MOVW $0x001d, (AX) + MOVW $0xfffb, 2(AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short + +repeat_five_standalone_emit_copy_short: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + JMP two_byte_offset_standalone + +two_byte_offset_short_standalone: + CMPL DX, $0x0c + JGE emit_copy_three_standalone + CMPL CX, $0x00000800 + JGE emit_copy_three_standalone + MOVB $0x01, SI + LEAL -16(SI)(DX*4), DX + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +emit_copy_three_standalone: + MOVB $0x02, SI + LEAL -4(SI)(DX*4), DX + MOVB DL, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopyNoRepeat(dst []byte, offset int, length int) int +TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JL two_byte_offset_standalone_snappy + +four_bytes_loop_back_standalone_snappy: + CMPL DX, $0x40 + JLE four_bytes_remain_standalone_snappy + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JL four_bytes_remain_standalone_snappy + JMP four_bytes_loop_back_standalone_snappy + +four_bytes_remain_standalone_snappy: + TESTL DX, DX + JZ gen_emit_copy_end_snappy + MOVB $0x03, SI + LEAL -4(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end_snappy + +two_byte_offset_standalone_snappy: + CMPL DX, $0x40 + JLE two_byte_offset_short_standalone_snappy + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + JMP two_byte_offset_standalone_snappy + +two_byte_offset_short_standalone_snappy: + CMPL DX, $0x0c + JGE emit_copy_three_standalone_snappy + CMPL CX, $0x00000800 + JGE emit_copy_three_standalone_snappy + MOVB $0x01, SI + LEAL -16(SI)(DX*4), DX + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end_snappy + +emit_copy_three_standalone_snappy: + MOVB $0x02, SI + LEAL -4(SI)(DX*4), DX + MOVB DL, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end_snappy: + MOVQ BX, ret+40(FP) + RET + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JL matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef GOAMD64_v4 + TZCNTQ BX, BX + +#define TZCNTQ_EMITTED 1 +#endif + +#ifdef TZCNTQ_EMITTED +#undef TZCNTQ_EMITTED +#else + BSFQ BX, BX + +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JGE matchlen_loopback_standalone + JZ gen_match_len_end + +matchlen_match4_standalone: + CMPL DX, $0x04 + JL matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + SUBL $0x04, DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JL matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + SUBL $0x02, DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JL gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + LEAL 1(SI), SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go new file mode 100644 index 00000000..fd857682 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -0,0 +1,525 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" +) + +const ( + S2IndexHeader = "s2idx\x00" + S2IndexTrailer = "\x00xdi2s" + maxIndexEntries = 1 << 16 +) + +// Index represents an S2/Snappy index. +type Index struct { + TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown. + info []struct { + compressedOffset int64 + uncompressedOffset int64 + } + estBlockUncomp int64 +} + +func (i *Index) reset(maxBlock int) { + i.estBlockUncomp = int64(maxBlock) + i.TotalCompressed = -1 + i.TotalUncompressed = -1 + if len(i.info) > 0 { + i.info = i.info[:0] + } +} + +// allocInfos will allocate an empty slice of infos. +func (i *Index) allocInfos(n int) { + if n > maxIndexEntries { + panic("n > maxIndexEntries") + } + i.info = make([]struct { + compressedOffset int64 + uncompressedOffset int64 + }, 0, n) +} + +// add an uncompressed and compressed pair. +// Entries must be sent in order. +func (i *Index) add(compressedOffset, uncompressedOffset int64) error { + if i == nil { + return nil + } + lastIdx := len(i.info) - 1 + if lastIdx >= 0 { + latest := i.info[lastIdx] + if latest.uncompressedOffset == uncompressedOffset { + // Uncompressed didn't change, don't add entry, + // but update start index. + latest.compressedOffset = compressedOffset + i.info[lastIdx] = latest + return nil + } + if latest.uncompressedOffset > uncompressedOffset { + return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + if latest.compressedOffset > compressedOffset { + return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + } + i.info = append(i.info, struct { + compressedOffset int64 + uncompressedOffset int64 + }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset}) + return nil +} + +// Find the offset at or before the wanted (uncompressed) offset. +// If offset is 0 or positive it is the offset from the beginning of the file. +// If the uncompressed size is known, the offset must be within the file. +// If an offset outside the file is requested io.ErrUnexpectedEOF is returned. +// If the offset is negative, it is interpreted as the distance from the end of the file, +// where -1 represents the last byte. +// If offset from the end of the file is requested, but size is unknown, +// ErrUnsupported will be returned. +func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) { + if i.TotalUncompressed < 0 { + return 0, 0, ErrCorrupt + } + if offset < 0 { + offset = i.TotalUncompressed + offset + if offset < 0 { + return 0, 0, io.ErrUnexpectedEOF + } + } + if offset > i.TotalUncompressed { + return 0, 0, io.ErrUnexpectedEOF + } + for _, info := range i.info { + if info.uncompressedOffset > offset { + break + } + compressedOff = info.compressedOffset + uncompressedOff = info.uncompressedOffset + } + return compressedOff, uncompressedOff, nil +} + +// reduce to stay below maxIndexEntries +func (i *Index) reduce() { + if len(i.info) < maxIndexEntries && i.estBlockUncomp >= 1<<20 { + return + } + + // Algorithm, keep 1, remove removeN entries... + removeN := (len(i.info) + 1) / maxIndexEntries + src := i.info + j := 0 + + // Each block should be at least 1MB, but don't reduce below 1000 entries. + for i.estBlockUncomp*(int64(removeN)+1) < 1<<20 && len(i.info)/(removeN+1) > 1000 { + removeN++ + } + for idx := 0; idx < len(src); idx++ { + i.info[j] = src[idx] + j++ + idx += removeN + } + i.info = i.info[:j] + // Update maxblock estimate. + i.estBlockUncomp += i.estBlockUncomp * int64(removeN) +} + +func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte { + i.reduce() + var tmp [binary.MaxVarintLen64]byte + + initSize := len(b) + // We make the start a skippable header+size. + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + // Total Uncompressed size + n := binary.PutVarint(tmp[:], uncompTotal) + b = append(b, tmp[:n]...) + // Total Compressed size + n = binary.PutVarint(tmp[:], compTotal) + b = append(b, tmp[:n]...) + // Put EstBlockUncomp size + n = binary.PutVarint(tmp[:], i.estBlockUncomp) + b = append(b, tmp[:n]...) + // Put length + n = binary.PutVarint(tmp[:], int64(len(i.info))) + b = append(b, tmp[:n]...) + + // Check if we should add uncompressed offsets + var hasUncompressed byte + for idx, info := range i.info { + if idx == 0 { + if info.uncompressedOffset != 0 { + hasUncompressed = 1 + break + } + continue + } + if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp { + hasUncompressed = 1 + break + } + } + b = append(b, hasUncompressed) + + // Add each entry + if hasUncompressed == 1 { + for idx, info := range i.info { + uOff := info.uncompressedOffset + if idx > 0 { + prev := i.info[idx-1] + uOff -= prev.uncompressedOffset + (i.estBlockUncomp) + } + n = binary.PutVarint(tmp[:], uOff) + b = append(b, tmp[:n]...) + } + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + for idx, info := range i.info { + cOff := info.compressedOffset + if idx > 0 { + prev := i.info[idx-1] + cOff -= prev.compressedOffset + cPredict + // Update compressed size prediction, with half the error. + cPredict += cOff / 2 + } + n = binary.PutVarint(tmp[:], cOff) + b = append(b, tmp[:n]...) + } + + // Add Total Size. + // Stored as fixed size for easier reading. + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + // Update size + chunkLen := len(b) - initSize - skippableFrameHeader + b[initSize+1] = uint8(chunkLen >> 0) + b[initSize+2] = uint8(chunkLen >> 8) + b[initSize+3] = uint8(chunkLen >> 16) + //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal) + return b +} + +// Load a binary index. +// A zero value Index can be used or a previous one can be reused. +func (i *Index) Load(b []byte) ([]byte, error) { + if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + if b[0] != ChunkTypeIndex { + return b, ErrCorrupt + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return b, io.ErrUnexpectedEOF + } + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return b, ErrUnsupported + } + b = b[len(S2IndexHeader):] + + // Total Uncompressed + if v, n := binary.Varint(b); n <= 0 || v < 0 { + return b, ErrCorrupt + } else { + i.TotalUncompressed = v + b = b[n:] + } + + // Total Compressed + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + i.TotalCompressed = v + b = b[n:] + } + + // Read EstBlockUncomp + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 { + return b, ErrCorrupt + } + i.estBlockUncomp = v + b = b[n:] + } + + var entries int + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 || v > maxIndexEntries { + return b, ErrCorrupt + } + entries = int(v) + b = b[n:] + } + if cap(i.info) < entries { + i.allocInfos(entries) + } + i.info = i.info[:entries] + + if len(b) < 1 { + return b, io.ErrUnexpectedEOF + } + hasUncompressed := b[0] + b = b[1:] + if hasUncompressed&1 != hasUncompressed { + return b, ErrCorrupt + } + + // Add each uncompressed entry + for idx := range i.info { + var uOff int64 + if hasUncompressed != 0 { + // Load delta + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + uOff = v + b = b[n:] + } + } + + if idx > 0 { + prev := i.info[idx-1].uncompressedOffset + uOff += prev + (i.estBlockUncomp) + if uOff <= prev { + return b, ErrCorrupt + } + } + if uOff < 0 { + return b, ErrCorrupt + } + i.info[idx].uncompressedOffset = uOff + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + // Add each compressed entry + for idx := range i.info { + var cOff int64 + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + cOff = v + b = b[n:] + } + + if idx > 0 { + // Update compressed size prediction, with half the error. + cPredictNew := cPredict + cOff/2 + + prev := i.info[idx-1].compressedOffset + cOff += prev + cPredict + if cOff <= prev { + return b, ErrCorrupt + } + cPredict = cPredictNew + } + if cOff < 0 { + return b, ErrCorrupt + } + i.info[idx].compressedOffset = cOff + } + if len(b) < 4+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + // Skip size... + b = b[4:] + + // Check trailer... + if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return b, ErrCorrupt + } + return b[len(S2IndexTrailer):], nil +} + +// LoadStream will load an index from the end of the supplied stream. +// ErrUnsupported will be returned if the signature cannot be found. +// ErrCorrupt will be returned if unexpected values are found. +// io.ErrUnexpectedEOF is returned if there are too few bytes. +// IO errors are returned as-is. +func (i *Index) LoadStream(rs io.ReadSeeker) error { + // Go to end. + _, err := rs.Seek(-10, io.SeekEnd) + if err != nil { + return err + } + var tmp [10]byte + _, err = io.ReadFull(rs, tmp[:]) + if err != nil { + return err + } + // Check trailer... + if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return ErrUnsupported + } + sz := binary.LittleEndian.Uint32(tmp[:4]) + if sz > maxChunkSize+skippableFrameHeader { + return ErrCorrupt + } + _, err = rs.Seek(-int64(sz), io.SeekEnd) + if err != nil { + return err + } + + // Read index. + buf := make([]byte, sz) + _, err = io.ReadFull(rs, buf) + if err != nil { + return err + } + _, err = i.Load(buf) + return err +} + +// IndexStream will return an index for a stream. +// The stream structure will be checked, but +// data within blocks is not verified. +// The returned index can either be appended to the end of the stream +// or stored separately. +func IndexStream(r io.Reader) ([]byte, error) { + var i Index + var buf [maxChunkSize]byte + var readHeader bool + for { + _, err := io.ReadFull(r, buf[:4]) + if err != nil { + if err == io.EOF { + return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil + } + return nil, err + } + // Start of this chunk. + startChunk := i.TotalCompressed + i.TotalCompressed += 4 + + chunkType := buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + return nil, ErrCorrupt + } + readHeader = true + } + chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16 + if chunkLen < checksumSize { + return nil, ErrCorrupt + } + + i.TotalCompressed += int64(chunkLen) + _, err = io.ReadFull(r, buf[:chunkLen]) + if err != nil { + return nil, io.ErrUnexpectedEOF + } + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + // Skip checksum. + dLen, err := DecodedLen(buf[checksumSize:]) + if err != nil { + return nil, err + } + if dLen > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(dLen) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(dLen) + continue + case chunkTypeUncompressedData: + n2 := chunkLen - checksumSize + if n2 > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(n2) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(n2) + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + return nil, ErrCorrupt + } + + if string(buf[:len(magicBody)]) != magicBody { + if string(buf[:len(magicBody)]) != magicBodySnappy { + return nil, ErrCorrupt + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + return nil, ErrUnsupported + } + if chunkLen > maxChunkSize { + return nil, ErrUnsupported + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + } +} + +// JSON returns the index as JSON text. +func (i *Index) JSON() []byte { + x := struct { + TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown. + Offsets []struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + } `json:"offsets"` + EstBlockUncomp int64 `json:"est_block_uncompressed"` + }{ + TotalUncompressed: i.TotalUncompressed, + TotalCompressed: i.TotalCompressed, + EstBlockUncomp: i.estBlockUncomp, + } + for _, v := range i.info { + x.Offsets = append(x.Offsets, struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + }{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset}) + } + b, _ := json.MarshalIndent(x, "", " ") + return b +} diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go new file mode 100644 index 00000000..dae3f731 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -0,0 +1,143 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2 implements the S2 compression format. +// +// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput, +// which is why it features concurrent compression for bigger payloads. +// +// Decoding is compatible with Snappy compressed content, +// but content compressed with S2 cannot be decompressed by Snappy. +// +// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2 +// +// There are actually two S2 formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a S2 stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// A "better" compression option is available. This will trade some compression +// speed +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// Blocks to not offer much data protection, so it is up to you to +// add data validation of decompressed blocks. +// +// Streams perform CRC validation of the decompressed data. +// Stream compression will also be performed on multiple CPU cores concurrently +// significantly improving throughput. +package s2 + +import ( + "bytes" + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy + magicBodySnappy = "sNaPpY" + magicBody = "S2sTwO" + + // maxBlockSize is the maximum size of the input to encodeBlock. + // + // For the framing format (Writer type instead of Encode function), + // this is the maximum uncompressed size of a block. + maxBlockSize = 4 << 20 + + // minBlockSize is the minimum size of block setting when creating a writer. + minBlockSize = 4 << 10 + + skippableFrameHeader = 4 + maxChunkSize = 1<<24 - 1 // 16777215 + + // Default block size + defaultBlockSize = 1 << 20 + + // maxSnappyBlockSize is the maximum snappy block size. + maxSnappyBlockSize = 1 << 16 + + obufHeaderLen = checksumSize + chunkHeaderSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + ChunkTypeIndex = 0x99 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// literalExtraSize returns the extra size of encoding n literals. +// n should be >= 0 and <= math.MaxUint32. +func literalExtraSize(n int64) int64 { + if n == 0 { + return 0 + } + switch { + case n < 60: + return 1 + case n < 1<<8: + return 2 + case n < 1<<16: + return 3 + case n < 1<<24: + return 4 + default: + return 5 + } +} + +type byter interface { + Bytes() []byte +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/github.com/klauspost/cpuid/v2/.gitignore similarity index 83% rename from vendor/go.uber.org/zap/.gitignore rename to vendor/github.com/klauspost/cpuid/v2/.gitignore index da9d9d00..daf913b1 100644 --- a/vendor/go.uber.org/zap/.gitignore +++ b/vendor/github.com/klauspost/cpuid/v2/.gitignore @@ -6,7 +6,6 @@ # Folders _obj _test -vendor # Architecture specific extensions/prefixes *.[568vq] @@ -23,10 +22,3 @@ _testmain.go *.exe *.test *.prof -*.pprof -*.out -*.log - -/bin -cover.out -cover.html diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml new file mode 100644 index 00000000..944cc000 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml @@ -0,0 +1,74 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com + +builds: + - + id: "cpuid" + binary: cpuid + main: ./cmd/cpuid/main.go + env: + - CGO_ENABLED=0 + flags: + - -ldflags=-s -w + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm64 + goarm: + - 7 + +archives: + - + id: cpuid + name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - LICENSE +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/cpuid + maintainer: Klaus Post + description: CPUID Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt new file mode 100644 index 00000000..2ef4714f --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt @@ -0,0 +1,35 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/wiggin77/cfg/LICENSE b/vendor/github.com/klauspost/cpuid/v2/LICENSE similarity index 95% rename from vendor/github.com/wiggin77/cfg/LICENSE rename to vendor/github.com/klauspost/cpuid/v2/LICENSE index 2b0bf7ef..5cec7ee9 100644 --- a/vendor/github.com/wiggin77/cfg/LICENSE +++ b/vendor/github.com/klauspost/cpuid/v2/LICENSE @@ -1,6 +1,6 @@ -MIT License +The MIT License (MIT) -Copyright (c) 2018 wiggin77 +Copyright (c) 2015 Klaus Post Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md new file mode 100644 index 00000000..bc2f98f0 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -0,0 +1,137 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2) +[![Build Status][3]][4] + +[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master +[4]: https://travis-ci.org/klauspost/cpuid + +## installing + +`go get -u github.com/klauspost/cpuid/v2` using modules. + +Drop `v2` for others. + +## example + +```Go +package main + +import ( + "fmt" + "strings" + + . "github.com/klauspost/cpuid/v2" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", CPU.BrandName) + fmt.Println("PhysicalCores:", CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", CPU.LogicalCores) + fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID) + fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ",")) + fmt.Println("Cacheline bytes:", CPU.CacheLine) + fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes") + fmt.Println("Frequency", CPU.Hz, "hz") + + // Test if we have these specific features: + if CPU.Supports(SSE, SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: AMD Ryzen 9 3950X 16-Core Processor +PhysicalCores: 16 +ThreadsPerCore: 2 +LogicalCores: 32 +Family 23 Model: 113 Vendor ID: AMD +Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3 +Cacheline bytes: 64 +L1 Data Cache: 32768 bytes +L1 Instruction Cache: 32768 bytes +L2 Cache: 524288 bytes +L3 Cache: 16777216 bytes +Frequency 0 hz +We have Streaming SIMD 2 Extensions +``` + +# usage + +The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features. +A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler. + +Note that for some cpu/os combinations some features will not be detected. +`amd64` has rather good support and should work reliably on all platforms. + +Note that hypervisors may not pass through all CPU features. + +## arm64 feature detection + +Not all operating systems provide ARM features directly +and there is no safe way to do so for the rest. + +Currently `arm64/linux` and `arm64/freebsd` should be quite reliable. +`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected. + +A `DetectARM()` can be used if you are able to control your deployment, +it will detect CPU features, but may crash if the OS doesn't intercept the calls. +A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below. + +Note that currently only features are detected on ARM, +no additional information is currently available. + +## flags + +It is possible to add flags that affects cpu detection. + +For this the `Flags()` command is provided. + +This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called. + +This means that any detection used in `init()` functions will not contain these flags. + +Example: + +```Go +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/klauspost/cpuid/v2" +) + +func main() { + cpuid.Flags() + flag.Parse() + cpuid.Detect() + + // Test if we have these specific features: + if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go new file mode 100644 index 00000000..3d543ce9 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -0,0 +1,1132 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) as well as arm64 is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import ( + "flag" + "fmt" + "math" + "os" + "runtime" + "strings" +) + +// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf +// and Processor Programming Reference (PPR) + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + VendorUnknown Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM + Bhyve + Hygon + SiS + RDC + + Ampere + ARM + Broadcom + Cavium + DEC + Fujitsu + Infineon + Motorola + NVIDIA + AMCC + Qualcomm + Marvell + + lastVendor +) + +//go:generate stringer -type=FeatureID,Vendor + +// FeatureID is the ID of a specific cpu feature. +type FeatureID int + +const ( + // Keep index -1 as unknown + UNKNOWN = -1 + + // Add features + ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + AESNI // Advanced Encryption Standard New Instructions + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXINT8 // Tile computational operations on 8-bit integers + AMXTILE // Tile architecture + AVX // AVX functions + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one. + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + CETIBT // Intel CET Indirect Branch Tracking + CETSS // Intel CET Shadow Stack + CLDEMOTE // Cache Line Demote + CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported + CMOV // i686 CMOV + CMPXCHG8 // CMPXCHG8 instruction + CPBOOST // Core Performance Boost + CX16 // CMPXCHG16B Instruction + ENQCMD // Enqueue Command + ERMS // Enhanced REP MOVSB/STOSB + F16C // Half-precision floating-point conversion + FMA3 // Intel FMA 3. Does not imply AVX. + FMA4 // Bulldozer FMA4 functions + FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 + FXSROPT // FXSAVE/FXRSTOR optimizations + GFNI // Galois Field New Instructions + HLE // Hardware Lock Elision + HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBS // Instruction Based Sampling (AMD) + IBSBRNTRGT // Instruction Based Sampling Feature (AMD) + IBSFETCHSAM // Instruction Based Sampling Feature (AMD) + IBSFFV // Instruction Based Sampling Feature (AMD) + IBSOPCNT // Instruction Based Sampling Feature (AMD) + IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) + IBSOPSAM // Instruction Based Sampling Feature (AMD) + IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) + IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported + LAHF // LAHF/SAHF in long mode + LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCOMMIT // MCOMMIT instruction supported + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + MOVBE // MOVBE instruction (big-endian) + MOVDIR64B // Move 64 Bytes as Direct Store + MOVDIRI // Move Doubleword as Direct Store + MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available + NX // NX (No-Execute) bit + OSXSAVE // XSAVE enabled by OS + POPCNT // POPCNT instruction + RDPRU // RDPRU instruction supported + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + RDTSCP // RDTSCP Instruction + RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. + SCE // SYSENTER and SYSEXIT instructions + SERIALIZE // Serialize Instruction Execution + SGX // Software Guard Extensions + SGXLC // Software Guard Extensions Launch Control + SHA // Intel SHA Extensions + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + STIBP // Single Thread Indirect Branch Predictors + SUCCOR // Software uncorrectable error containment and recovery capability. + TBM // AMD Trailing Bit Manipulation + TSXLDTRK // Intel TSX Suspend Load Address Tracking + VAES // Vector AES + VMX // Virtual Machine Extensions + VPCLMULQDQ // Carry-Less Multiplication Quadword + WAITPKG // TPAUSE, UMONITOR, UMWAIT + WBNOINVD // Write Back and Do Not Invalidate Cache + X87 // FPU + XOP // Bulldozer XOP functions + XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV + + // ARM features: + AESARM // AES instructions + ARMCPUID // Some CPU ID registers readable at user-level + ASIMD // Advanced SIMD + ASIMDDP // SIMD Dot Product + ASIMDHP // Advanced SIMD half-precision floating point + ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) + ATOMICS // Large System Extensions (LSE) + CRC32 // CRC32/CRC32C instructions + DCPOP // Data cache clean to Point of Persistence (DC CVAP) + EVTSTRM // Generic timer + FCMA // Floatin point complex number addition and multiplication + FP // Single-precision and double-precision floating point + FPHP // Half-precision floating point + GPA // Generic Pointer Authentication + JSCVT // Javascript-style double->int convert (FJCVTZS) + LRCPC // Weaker release consistency (LDAPR, etc) + PMULL // Polynomial Multiply instructions (PMULL/PMULL2) + SHA1 // SHA-1 instructions (SHA1C, etc) + SHA2 // SHA-2 instructions (SHA256H, etc) + SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) + SHA512 // SHA512 instructions + SM3 // SM3 instructions + SM4 // SM4 instructions + SVE // Scalable Vector Extension + + // Keep it last. It automatically defines the size of []flagSet + lastID + + firstID FeatureID = UNKNOWN + 1 +) + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + VendorString string // Raw vendor string. + featureSet flagSet // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected + } + SGX SGXSupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) +var darwinHasAVX512 = func() bool { return false } + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data. +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + // Set defaults + CPU.ThreadsPerCore = 1 + CPU.Cache.L1I = -1 + CPU.Cache.L1D = -1 + CPU.Cache.L2 = -1 + CPU.Cache.L3 = -1 + safe := true + if detectArmFlag != nil { + safe = !*detectArmFlag + } + addInfo(&CPU, safe) + if displayFeats != nil && *displayFeats { + fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ",")) + // Exit with non-zero so tests will print value. + os.Exit(1) + } + if disableFlag != nil { + s := strings.Split(*disableFlag, ",") + for _, feat := range s { + feat := ParseFeature(strings.TrimSpace(feat)) + if feat != UNKNOWN { + CPU.featureSet.unset(feat) + } + } + } +} + +// DetectARM will detect ARM64 features. +// This is NOT done automatically since it can potentially crash +// if the OS does not handle the command. +// If in the future this can be done safely this function may not +// do anything. +func DetectARM() { + addInfo(&CPU, false) +} + +var detectArmFlag *bool +var displayFeats *bool +var disableFlag *string + +// Flags will enable flags. +// This must be called *before* flag.Parse AND +// Detect must be called after the flags have been parsed. +// Note that this means that any detection used in init() functions +// will not contain these flags. +func Flags() { + disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list") + displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits") + detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash") +} + +// Supports returns whether the CPU supports all of the requested features. +func (c CPUInfo) Supports(ids ...FeatureID) bool { + for _, id := range ids { + if !c.featureSet.inSet(id) { + return false + } + } + return true +} + +// Has allows for checking a single feature. +// Should be inlined by the compiler. +func (c CPUInfo) Has(id FeatureID) bool { + return c.featureSet.inSet(id) +} + +// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels +var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2) +var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) +var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) +var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) + +// X64Level returns the microarchitecture level detected on the CPU. +// If features are lacking or non x64 mode, 0 is returned. +// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels +func (c CPUInfo) X64Level() int { + if c.featureSet.hasSet(level4Features) { + return 4 + } + if c.featureSet.hasSet(level3Features) { + return 3 + } + if c.featureSet.hasSet(level2Features) { + return 2 + } + if c.featureSet.hasSet(level1Features) { + return 1 + } + return 0 +} + +// Disable will disable one or several features. +func (c *CPUInfo) Disable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.unset(id) + } + return true +} + +// Enable will disable one or several features even if they were undetected. +// This is of course not recommended for obvious reasons. +func (c *CPUInfo) Enable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.set(id) + } + return true +} + +// IsVendor returns true if vendor is recognized as Intel +func (c CPUInfo) IsVendor(v Vendor) bool { + return c.VendorID == v +} + +func (c CPUInfo) FeatureSet() []string { + s := make([]string, 0) + s = append(s, c.featureSet.Strings()...) + return s +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.Supports(RDTSCP) { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.Supports(RDTSCP) { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// frequencies tries to compute the clock speed of the CPU. If leaf 15 is +// supported, use it, otherwise parse the brand string. Yes, really. +func (c *CPUInfo) frequencies() { + c.Hz, c.BoostFreq = 0, 0 + mfi := maxFunctionID() + if mfi >= 0x15 { + eax, ebx, ecx, _ := cpuid(0x15) + if eax != 0 && ebx != 0 && ecx != 0 { + c.Hz = (int64(ecx) * int64(ebx)) / int64(eax) + } + } + if mfi >= 0x16 { + a, b, _, _ := cpuid(0x16) + // Base... + if a&0xffff > 0 { + c.Hz = int64(a&0xffff) * 1_000_000 + } + // Boost... + if b&0xffff > 0 { + c.BoostFreq = int64(b&0xffff) * 1_000_000 + } + } + if c.Hz > 0 { + return + } + + // computeHz determines the official rated speed of a CPU from its brand + // string. This insanity is *actually the official documented way to do + // this according to Intel*, prior to leaf 0x15 existing. The official + // documentation only shows this working for exactly `x.xx` or `xxxx` + // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other + // sizes. + model := c.BrandName + hz := strings.LastIndex(model, "Hz") + if hz < 3 { + return + } + var multiplier int64 + switch model[hz-1] { + case 'M': + multiplier = 1000 * 1000 + case 'G': + multiplier = 1000 * 1000 * 1000 + case 'T': + multiplier = 1000 * 1000 * 1000 * 1000 + } + if multiplier == 0 { + return + } + freq := int64(0) + divisor := int64(0) + decimalShift := int64(1) + var i int + for i = hz - 2; i >= 0 && model[i] != ' '; i-- { + if model[i] >= '0' && model[i] <= '9' { + freq += int64(model[i]-'0') * decimalShift + decimalShift *= 10 + } else if model[i] == '.' { + if divisor != 0 { + return + } + divisor = decimalShift + } else { + return + } + } + // we didn't find a space + if i < 0 { + return + } + if divisor != 0 { + c.Hz = (freq * multiplier) / divisor + return + } + c.Hz = freq * multiplier +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. +func (c CPUInfo) VM() bool { + return CPU.featureSet.inSet(HYPERVISOR) +} + +// flags contains detected cpu features and characteristics +type flags uint64 + +// log2(bits_in_uint64) +const flagBitsLog2 = 6 +const flagBits = 1 << flagBitsLog2 +const flagMask = flagBits - 1 + +// flagSet contains detected cpu features and characteristics in an array of flags +type flagSet [(lastID + flagMask) / flagBits]flags + +func (s flagSet) inSet(feat FeatureID) bool { + return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 +} + +func (s *flagSet) set(feat FeatureID) { + s[feat>>flagBitsLog2] |= 1 << (feat & flagMask) +} + +// setIf will set a feature if boolean is true. +func (s *flagSet) setIf(cond bool, features ...FeatureID) { + if cond { + for _, offset := range features { + s[offset>>flagBitsLog2] |= 1 << (offset & flagMask) + } + } +} + +func (s *flagSet) unset(offset FeatureID) { + bit := flags(1 << (offset & flagMask)) + s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit +} + +// or with another flagset. +func (s *flagSet) or(other flagSet) { + for i, v := range other[:] { + s[i] |= v + } +} + +// hasSet returns whether all features are present. +func (s flagSet) hasSet(other flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +func flagSetWith(feat ...FeatureID) flagSet { + var res flagSet + for _, f := range feat { + res.set(f) + } + return res +} + +// ParseFeature will parse the string and return the ID of the matching feature. +// Will return UNKNOWN if not found. +func ParseFeature(s string) FeatureID { + s = strings.ToUpper(s) + for i := firstID; i < lastID; i++ { + if i.String() == s { + return i + } + } + return UNKNOWN +} + +// Strings returns an array of the detected features for FlagsSet. +func (s flagSet) Strings() []string { + if len(s) == 0 { + return []string{""} + } + r := make([]string, 0) + for i := firstID; i < lastID; i++ { + if s.inSet(i) { + r = append(r, i.String()) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + vend, _ := vendorID() + + if mfi < 0x4 || (vend != Intel && vend != AMD) { + return 1 + } + + if mfi < 0xb { + if vend != Intel { + return 1 + } + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + if vend == AMD { + // Workaround for AMD returning 0, assume 2 if >= Zen 2 + // It will be more correct than not. + fam, _ := familyModel() + _, _, _, d := cpuid(1) + if (d&(1<<28)) != 0 && fam >= 23 { + return 2 + } + } + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + v, _ := vendorID() + switch v { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD, Hygon: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + v, _ := vendorID() + switch v { + case Intel: + return logicalCores() / threadsPerCore() + case AMD, Hygon: + lc := logicalCores() + tpc := threadsPerCore() + if lc > 0 && tpc > 0 { + return lc / tpc + } + + // The following is inaccurate on AMD EPYC 7742 64-Core Processor + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + if c&0xff > 0 { + return int(c&0xff) + 1 + } + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVMKVM": KVM, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, + "bhyve bhyve ": Bhyve, + "HygonGenuine": Hygon, + "Vortex86 SoC": SiS, + "SiS SiS SiS ": SiS, + "RiseRiseRise": SiS, + "Genuine RDC": RDC, +} + +func vendorID() (Vendor, string) { + _, b, c, d := cpuid(0) + v := string(valAsString(b, d, c)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor, _ := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0 + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD, Hygon: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + + // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties + if maxExtendedFunction() < 0x8000001D { + return + } + for i := uint32(0); i < math.MaxUint32; i++ { + eax, ebx, ecx, _ := cpuidex(0x8000001D, i) + + level := (eax >> 5) & 7 + cacheNumSets := ecx + 1 + cacheLineSize := 1 + (ebx & 2047) + cachePhysPartitions := 1 + ((ebx >> 12) & 511) + cacheNumWays := 1 + ((ebx >> 22) & 511) + + typ := eax & 15 + size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays) + if typ == 0 { + return + } + + switch level { + case 1: + switch typ { + case 1: + // Data cache + c.Cache.L1D = size + case 2: + // Inst cache + c.Cache.L1I = size + default: + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + } +} + +type SGXEPCSection struct { + BaseAddress uint64 + EPCSize uint64 +} + +type SGXSupport struct { + Available bool + LaunchControl bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 + EPCSections []SGXEPCSection +} + +func hasSGX(available, lc bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + rval.LaunchControl = lc + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + rval.EPCSections = make([]SGXEPCSection, 0) + + for subleaf := uint32(2); subleaf < 2+8; subleaf++ { + eax, ebx, ecx, edx := cpuidex(0x12, subleaf) + leafType := eax & 0xf + + if leafType == 0 { + // Invalid subleaf, stop iterating + break + } else if leafType == 1 { + // EPC Section subleaf + baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32) + size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32) + + section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size} + rval.EPCSections = append(rval.EPCSections, section) + } + } + + return +} + +func support() flagSet { + var fs flagSet + mfi := maxFunctionID() + vend, _ := vendorID() + if mfi < 0x1 { + return fs + } + family, model := familyModel() + + _, _, c, d := cpuid(1) + fs.setIf((d&(1<<0)) != 0, X87) + fs.setIf((d&(1<<8)) != 0, CMPXCHG8) + fs.setIf((d&(1<<11)) != 0, SCE) + fs.setIf((d&(1<<15)) != 0, CMOV) + fs.setIf((d&(1<<22)) != 0, MMXEXT) + fs.setIf((d&(1<<23)) != 0, MMX) + fs.setIf((d&(1<<24)) != 0, FXSR) + fs.setIf((d&(1<<25)) != 0, FXSROPT) + fs.setIf((d&(1<<25)) != 0, SSE) + fs.setIf((d&(1<<26)) != 0, SSE2) + fs.setIf((c&1) != 0, SSE3) + fs.setIf((c&(1<<5)) != 0, VMX) + fs.setIf((c&0x00000200) != 0, SSSE3) + fs.setIf((c&0x00080000) != 0, SSE4) + fs.setIf((c&0x00100000) != 0, SSE42) + fs.setIf((c&(1<<25)) != 0, AESNI) + fs.setIf((c&(1<<1)) != 0, CLMUL) + fs.setIf(c&(1<<22) != 0, MOVBE) + fs.setIf(c&(1<<23) != 0, POPCNT) + fs.setIf(c&(1<<30) != 0, RDRAND) + + // This bit has been reserved by Intel & AMD for use by hypervisors, + // and indicates the presence of a hypervisor. + fs.setIf(c&(1<<31) != 0, HYPERVISOR) + fs.setIf(c&(1<<29) != 0, F16C) + fs.setIf(c&(1<<13) != 0, CX16) + + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + fs.setIf(c&1<<26 != 0, XSAVE) + fs.setIf(c&1<<27 != 0, OSXSAVE) + // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits + const avxCheck = 1<<26 | 1<<27 | 1<<28 + if c&avxCheck == avxCheck { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + fs.set(AVX) + switch vend { + case Intel: + // Older than Haswell. + fs.setIf(family == 6 && model < 60, AVXSLOW) + case AMD: + // Older than Zen 2 + fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW) + } + } + } + // FMA3 can be used with SSE registers, so no OS support is strictly needed. + // fma3 and OSXSAVE needed. + const fma3Check = 1<<12 | 1<<27 + fs.setIf(c&fma3Check == fma3Check, FMA3) + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, edx := cpuidex(7, 0) + eax1, _, _, _ := cpuidex(7, 1) + if fs.inSet(AVX) && (ebx&0x00000020) != 0 { + fs.set(AVX2) + } + // CPUID.(EAX=7, ECX=0).EBX + if (ebx & 0x00000008) != 0 { + fs.set(BMI1) + fs.setIf((ebx&0x00000100) != 0, BMI2) + } + fs.setIf(ebx&(1<<2) != 0, SGX) + fs.setIf(ebx&(1<<4) != 0, HLE) + fs.setIf(ebx&(1<<9) != 0, ERMS) + fs.setIf(ebx&(1<<11) != 0, RTM) + fs.setIf(ebx&(1<<14) != 0, MPX) + fs.setIf(ebx&(1<<18) != 0, RDSEED) + fs.setIf(ebx&(1<<19) != 0, ADX) + fs.setIf(ebx&(1<<29) != 0, SHA) + // CPUID.(EAX=7, ECX=0).ECX + fs.setIf(ecx&(1<<5) != 0, WAITPKG) + fs.setIf(ecx&(1<<7) != 0, CETSS) + fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) + fs.setIf(ecx&(1<<27) != 0, MOVDIRI) + fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) + fs.setIf(ecx&(1<<29) != 0, ENQCMD) + fs.setIf(ecx&(1<<30) != 0, SGXLC) + // CPUID.(EAX=7, ECX=0).EDX + fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) + fs.setIf(edx&(1<<14) != 0, SERIALIZE) + fs.setIf(edx&(1<<16) != 0, TSXLDTRK) + fs.setIf(edx&(1<<20) != 0, CETIBT) + fs.setIf(edx&(1<<26) != 0, IBPB) + fs.setIf(edx&(1<<27) != 0, STIBP) + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3 + if runtime.GOOS == "darwin" { + hasAVX512 = fs.inSet(AVX) && darwinHasAVX512() + } + if hasAVX512 { + fs.setIf(ebx&(1<<16) != 0, AVX512F) + fs.setIf(ebx&(1<<17) != 0, AVX512DQ) + fs.setIf(ebx&(1<<21) != 0, AVX512IFMA) + fs.setIf(ebx&(1<<26) != 0, AVX512PF) + fs.setIf(ebx&(1<<27) != 0, AVX512ER) + fs.setIf(ebx&(1<<28) != 0, AVX512CD) + fs.setIf(ebx&(1<<30) != 0, AVX512BW) + fs.setIf(ebx&(1<<31) != 0, AVX512VL) + // ecx + fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) + fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) + fs.setIf(ecx&(1<<8) != 0, GFNI) + fs.setIf(ecx&(1<<9) != 0, VAES) + fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) + fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) + fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) + fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ) + // edx + fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT) + fs.setIf(edx&(1<<22) != 0, AMXBF16) + fs.setIf(edx&(1<<23) != 0, AVX512FP16) + fs.setIf(edx&(1<<24) != 0, AMXTILE) + fs.setIf(edx&(1<<25) != 0, AMXINT8) + // eax1 = CPUID.(EAX=7, ECX=1).EAX + fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + fs.set(LZCNT) + fs.set(POPCNT) + } + fs.setIf((c&(1<<0)) != 0, LAHF) + fs.setIf((c&(1<<10)) != 0, IBS) + fs.setIf((d&(1<<31)) != 0, AMD3DNOW) + fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT) + fs.setIf((d&(1<<23)) != 0, MMX) + fs.setIf((d&(1<<22)) != 0, MMXEXT) + fs.setIf((c&(1<<6)) != 0, SSE4A) + fs.setIf(d&(1<<20) != 0, NX) + fs.setIf(d&(1<<27) != 0, RDTSCP) + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if fs.inSet(AVX) { + fs.setIf((c&0x00000800) != 0, XOP) + fs.setIf((c&0x00010000) != 0, FMA4) + } + + } + if maxExtendedFunction() >= 0x80000007 { + _, b, _, d := cpuid(0x80000007) + fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW) + fs.setIf((b&(1<<1)) != 0, SUCCOR) + fs.setIf((b&(1<<2)) != 0, HWA) + fs.setIf((d&(1<<9)) != 0, CPBOOST) + } + + if maxExtendedFunction() >= 0x80000008 { + _, b, _, _ := cpuid(0x80000008) + fs.setIf((b&(1<<9)) != 0, WBNOINVD) + fs.setIf((b&(1<<8)) != 0, MCOMMIT) + fs.setIf((b&(1<<13)) != 0, INT_WBINVD) + fs.setIf((b&(1<<4)) != 0, RDPRU) + fs.setIf((b&(1<<3)) != 0, INVLPGB) + fs.setIf((b&(1<<1)) != 0, MSRIRC) + fs.setIf((b&(1<<0)) != 0, CLZERO) + } + + if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { + eax, _, _, _ := cpuid(0x8000001b) + fs.setIf((eax>>0)&1 == 1, IBSFFV) + fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM) + fs.setIf((eax>>2)&1 == 1, IBSOPSAM) + fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT) + fs.setIf((eax>>4)&1 == 1, IBSOPCNT) + fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT) + fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT) + fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK) + } + + return fs +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s new file mode 100644 index 00000000..8587c3a1 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s @@ -0,0 +1,47 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build 386,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0 + MOVL $0, eax+0(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s new file mode 100644 index 00000000..bc11f894 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s @@ -0,0 +1,72 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// From https://go-review.googlesource.com/c/sys/+/285572/ +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0-1 + MOVB $0, ret+0(FP) // default to false + +#ifdef GOOS_darwin // return if not darwin +#ifdef GOARCH_amd64 // return if not amd64 +// These values from: +// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h +#define commpage64_base_address 0x00007fffffe00000 +#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) +#define commpage64_version (commpage64_base_address+0x01E) +#define hasAVX512F 0x0000004000000000 + MOVQ $commpage64_version, BX + MOVW (BX), AX + CMPW AX, $13 // versions < 13 do not support AVX512 + JL no_avx512 + MOVQ $commpage64_cpu_capabilities64, BX + MOVQ (BX), AX + MOVQ $hasAVX512F, CX + ANDQ CX, AX + JZ no_avx512 + MOVB $1, ret+0(FP) + +no_avx512: +#endif +#endif + RET + diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s new file mode 100644 index 00000000..b31d6aec --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s @@ -0,0 +1,26 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build arm64,!gccgo,!noasm,!appengine + +// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt + +// func getMidr +TEXT ·getMidr(SB), 7, $0 + WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */ + MOVD R0, midr+0(FP) + RET + +// func getProcFeatures +TEXT ·getProcFeatures(SB), 7, $0 + WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */ + MOVD R0, procFeatures+0(FP) + RET + +// func getInstAttributes +TEXT ·getInstAttributes(SB), 7, $0 + WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */ + WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */ + MOVD R0, instAttrReg0+0(FP) + MOVD R1, instAttrReg1+8(FP) + RET + diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go new file mode 100644 index 00000000..9a53504a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -0,0 +1,247 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build arm64 && !gccgo && !noasm && !appengine +// +build arm64,!gccgo,!noasm,!appengine + +package cpuid + +import "runtime" + +func getMidr() (midr uint64) +func getProcFeatures() (procFeatures uint64) +func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } +} + +func addInfo(c *CPUInfo, safe bool) { + // Seems to be safe to assume on ARM64 + c.CacheLine = 64 + detectOS(c) + + // ARM64 disabled since it may crash if interrupt is not intercepted by OS. + if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" { + return + } + midr := getMidr() + + // MIDR_EL1 - Main ID Register + // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | Implementer | [31-24] | y | + // |--------------------------------------------------| + // | Variant | [23-20] | y | + // |--------------------------------------------------| + // | Architecture | [19-16] | y | + // |--------------------------------------------------| + // | PartNum | [15-4] | y | + // |--------------------------------------------------| + // | Revision | [3-0] | y | + // x--------------------------------------------------x + + switch (midr >> 24) & 0xff { + case 0xC0: + c.VendorString = "Ampere Computing" + c.VendorID = Ampere + case 0x41: + c.VendorString = "Arm Limited" + c.VendorID = ARM + case 0x42: + c.VendorString = "Broadcom Corporation" + c.VendorID = Broadcom + case 0x43: + c.VendorString = "Cavium Inc" + c.VendorID = Cavium + case 0x44: + c.VendorString = "Digital Equipment Corporation" + c.VendorID = DEC + case 0x46: + c.VendorString = "Fujitsu Ltd" + c.VendorID = Fujitsu + case 0x49: + c.VendorString = "Infineon Technologies AG" + c.VendorID = Infineon + case 0x4D: + c.VendorString = "Motorola or Freescale Semiconductor Inc" + c.VendorID = Motorola + case 0x4E: + c.VendorString = "NVIDIA Corporation" + c.VendorID = NVIDIA + case 0x50: + c.VendorString = "Applied Micro Circuits Corporation" + c.VendorID = AMCC + case 0x51: + c.VendorString = "Qualcomm Inc" + c.VendorID = Qualcomm + case 0x56: + c.VendorString = "Marvell International Ltd" + c.VendorID = Marvell + case 0x69: + c.VendorString = "Intel Corporation" + c.VendorID = Intel + } + + // Lower 4 bits: Architecture + // Architecture Meaning + // 0b0001 Armv4. + // 0b0010 Armv4T. + // 0b0011 Armv5 (obsolete). + // 0b0100 Armv5T. + // 0b0101 Armv5TE. + // 0b0110 Armv5TEJ. + // 0b0111 Armv6. + // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'. + // Upper 4 bit: Variant + // An IMPLEMENTATION DEFINED variant number. + // Typically, this field is used to distinguish between different product variants, or major revisions of a product. + c.Family = int(midr>>16) & 0xff + + // PartNum, bits [15:4] + // An IMPLEMENTATION DEFINED primary part number for the device. + // On processors implemented by Arm, if the top four bits of the primary + // part number are 0x0 or 0x7, the variant and architecture are encoded differently. + // Revision, bits [3:0] + // An IMPLEMENTATION DEFINED revision number for the device. + c.Model = int(midr) & 0xffff + + procFeatures := getProcFeatures() + + // ID_AA64PFR0_EL1 - Processor Feature Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | DIT | [51-48] | y | + // |--------------------------------------------------| + // | SVE | [35-32] | y | + // |--------------------------------------------------| + // | GIC | [27-24] | n | + // |--------------------------------------------------| + // | AdvSIMD | [23-20] | y | + // |--------------------------------------------------| + // | FP | [19-16] | y | + // |--------------------------------------------------| + // | EL3 | [15-12] | n | + // |--------------------------------------------------| + // | EL2 | [11-8] | n | + // |--------------------------------------------------| + // | EL1 | [7-4] | n | + // |--------------------------------------------------| + // | EL0 | [3-0] | n | + // x--------------------------------------------------x + + var f flagSet + // if procFeatures&(0xf<<48) != 0 { + // fmt.Println("DIT") + // } + f.setIf(procFeatures&(0xf<<32) != 0, SVE) + if procFeatures&(0xf<<20) != 15<<20 { + f.set(ASIMD) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1 + // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic. + f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP) + } + f.setIf(procFeatures&(0xf<<16) != 0, FP) + + instAttrReg0, instAttrReg1 := getInstAttributes() + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // + // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | TS | [55-52] | y | + // |--------------------------------------------------| + // | FHM | [51-48] | y | + // |--------------------------------------------------| + // | DP | [47-44] | y | + // |--------------------------------------------------| + // | SM4 | [43-40] | y | + // |--------------------------------------------------| + // | SM3 | [39-36] | y | + // |--------------------------------------------------| + // | SHA3 | [35-32] | y | + // |--------------------------------------------------| + // | RDM | [31-28] | y | + // |--------------------------------------------------| + // | ATOMICS | [23-20] | y | + // |--------------------------------------------------| + // | CRC32 | [19-16] | y | + // |--------------------------------------------------| + // | SHA2 | [15-12] | y | + // |--------------------------------------------------| + // | SHA1 | [11-8] | y | + // |--------------------------------------------------| + // | AES | [7-4] | y | + // x--------------------------------------------------x + + // if instAttrReg0&(0xf<<52) != 0 { + // fmt.Println("TS") + // } + // if instAttrReg0&(0xf<<48) != 0 { + // fmt.Println("FHM") + // } + f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) + f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) + f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) + f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3) + f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM) + f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS) + f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32) + f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. + f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512) + f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1) + f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities. + f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL) + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1 + // + // ID_AA64ISAR1_EL1 - Instruction set attribute register 1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | GPI | [31-28] | y | + // |--------------------------------------------------| + // | GPA | [27-24] | y | + // |--------------------------------------------------| + // | LRCPC | [23-20] | y | + // |--------------------------------------------------| + // | FCMA | [19-16] | y | + // |--------------------------------------------------| + // | JSCVT | [15-12] | y | + // |--------------------------------------------------| + // | API | [11-8] | y | + // |--------------------------------------------------| + // | APA | [7-4] | y | + // |--------------------------------------------------| + // | DPB | [3-0] | y | + // x--------------------------------------------------x + + // if instAttrReg1&(0xf<<28) != 0 { + // fmt.Println("GPI") + // } + f.setIf(instAttrReg1&(0xf<<28) != 24, GPA) + f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC) + f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA) + f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT) + // if instAttrReg1&(0xf<<8) != 0 { + // fmt.Println("API") + // } + // if instAttrReg1&(0xf<<4) != 0 { + // fmt.Println("APA") + // } + f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP) + + // Store + c.featureSet.or(f) +} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go new file mode 100644 index 00000000..9636c2bc --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go @@ -0,0 +1,15 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine +// +build !amd64,!386,!arm64 gccgo noasm appengine + +package cpuid + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } +} + +func addInfo(info *CPUInfo, safe bool) {} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go new file mode 100644 index 00000000..35678d8a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -0,0 +1,36 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine) +// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +func asmDarwinHasAVX512() bool + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm + darwinHasAVX512 = asmDarwinHasAVX512 +} + +func addInfo(c *CPUInfo, safe bool) { + c.maxFunc = maxFunctionID() + c.maxExFunc = maxExtendedFunction() + c.BrandName = brandName() + c.CacheLine = cacheLine() + c.Family, c.Model = familyModel() + c.featureSet = support() + c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) + c.ThreadsPerCore = threadsPerCore() + c.LogicalCores = logicalCores() + c.PhysicalCores = physicalCores() + c.VendorID, c.VendorString = vendorID() + c.cacheSize() + c.frequencies() +} diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go new file mode 100644 index 00000000..02fe232a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -0,0 +1,196 @@ +// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT. + +package cpuid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ADX-1] + _ = x[AESNI-2] + _ = x[AMD3DNOW-3] + _ = x[AMD3DNOWEXT-4] + _ = x[AMXBF16-5] + _ = x[AMXINT8-6] + _ = x[AMXTILE-7] + _ = x[AVX-8] + _ = x[AVX2-9] + _ = x[AVX512BF16-10] + _ = x[AVX512BITALG-11] + _ = x[AVX512BW-12] + _ = x[AVX512CD-13] + _ = x[AVX512DQ-14] + _ = x[AVX512ER-15] + _ = x[AVX512F-16] + _ = x[AVX512FP16-17] + _ = x[AVX512IFMA-18] + _ = x[AVX512PF-19] + _ = x[AVX512VBMI-20] + _ = x[AVX512VBMI2-21] + _ = x[AVX512VL-22] + _ = x[AVX512VNNI-23] + _ = x[AVX512VP2INTERSECT-24] + _ = x[AVX512VPOPCNTDQ-25] + _ = x[AVXSLOW-26] + _ = x[BMI1-27] + _ = x[BMI2-28] + _ = x[CETIBT-29] + _ = x[CETSS-30] + _ = x[CLDEMOTE-31] + _ = x[CLMUL-32] + _ = x[CLZERO-33] + _ = x[CMOV-34] + _ = x[CMPXCHG8-35] + _ = x[CPBOOST-36] + _ = x[CX16-37] + _ = x[ENQCMD-38] + _ = x[ERMS-39] + _ = x[F16C-40] + _ = x[FMA3-41] + _ = x[FMA4-42] + _ = x[FXSR-43] + _ = x[FXSROPT-44] + _ = x[GFNI-45] + _ = x[HLE-46] + _ = x[HTT-47] + _ = x[HWA-48] + _ = x[HYPERVISOR-49] + _ = x[IBPB-50] + _ = x[IBS-51] + _ = x[IBSBRNTRGT-52] + _ = x[IBSFETCHSAM-53] + _ = x[IBSFFV-54] + _ = x[IBSOPCNT-55] + _ = x[IBSOPCNTEXT-56] + _ = x[IBSOPSAM-57] + _ = x[IBSRDWROPCNT-58] + _ = x[IBSRIPINVALIDCHK-59] + _ = x[INT_WBINVD-60] + _ = x[INVLPGB-61] + _ = x[LAHF-62] + _ = x[LZCNT-63] + _ = x[MCAOVERFLOW-64] + _ = x[MCOMMIT-65] + _ = x[MMX-66] + _ = x[MMXEXT-67] + _ = x[MOVBE-68] + _ = x[MOVDIR64B-69] + _ = x[MOVDIRI-70] + _ = x[MPX-71] + _ = x[MSRIRC-72] + _ = x[NX-73] + _ = x[OSXSAVE-74] + _ = x[POPCNT-75] + _ = x[RDPRU-76] + _ = x[RDRAND-77] + _ = x[RDSEED-78] + _ = x[RDTSCP-79] + _ = x[RTM-80] + _ = x[RTM_ALWAYS_ABORT-81] + _ = x[SCE-82] + _ = x[SERIALIZE-83] + _ = x[SGX-84] + _ = x[SGXLC-85] + _ = x[SHA-86] + _ = x[SSE-87] + _ = x[SSE2-88] + _ = x[SSE3-89] + _ = x[SSE4-90] + _ = x[SSE42-91] + _ = x[SSE4A-92] + _ = x[SSSE3-93] + _ = x[STIBP-94] + _ = x[SUCCOR-95] + _ = x[TBM-96] + _ = x[TSXLDTRK-97] + _ = x[VAES-98] + _ = x[VMX-99] + _ = x[VPCLMULQDQ-100] + _ = x[WAITPKG-101] + _ = x[WBNOINVD-102] + _ = x[X87-103] + _ = x[XOP-104] + _ = x[XSAVE-105] + _ = x[AESARM-106] + _ = x[ARMCPUID-107] + _ = x[ASIMD-108] + _ = x[ASIMDDP-109] + _ = x[ASIMDHP-110] + _ = x[ASIMDRDM-111] + _ = x[ATOMICS-112] + _ = x[CRC32-113] + _ = x[DCPOP-114] + _ = x[EVTSTRM-115] + _ = x[FCMA-116] + _ = x[FP-117] + _ = x[FPHP-118] + _ = x[GPA-119] + _ = x[JSCVT-120] + _ = x[LRCPC-121] + _ = x[PMULL-122] + _ = x[SHA1-123] + _ = x[SHA2-124] + _ = x[SHA3-125] + _ = x[SHA512-126] + _ = x[SM3-127] + _ = x[SM4-128] + _ = x[SVE-129] + _ = x[lastID-130] + _ = x[firstID-0] +} + +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLAHFLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMPXMSRIRCNXOSXSAVEPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDX87XOPXSAVEAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" + +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 244, 249, 257, 262, 268, 272, 280, 287, 291, 297, 301, 305, 309, 313, 317, 324, 328, 331, 334, 337, 347, 351, 354, 364, 375, 381, 389, 400, 408, 420, 436, 446, 453, 457, 462, 473, 480, 483, 489, 494, 503, 510, 513, 519, 521, 528, 534, 539, 545, 551, 557, 560, 576, 579, 588, 591, 596, 599, 602, 606, 610, 614, 619, 624, 629, 634, 640, 643, 651, 655, 658, 668, 675, 683, 686, 689, 694, 700, 708, 713, 720, 727, 735, 742, 747, 752, 759, 763, 765, 769, 772, 777, 782, 787, 791, 795, 799, 805, 808, 811, 814, 820} + +func (i FeatureID) String() string { + if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { + return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VendorUnknown-0] + _ = x[Intel-1] + _ = x[AMD-2] + _ = x[VIA-3] + _ = x[Transmeta-4] + _ = x[NSC-5] + _ = x[KVM-6] + _ = x[MSVM-7] + _ = x[VMware-8] + _ = x[XenHVM-9] + _ = x[Bhyve-10] + _ = x[Hygon-11] + _ = x[SiS-12] + _ = x[RDC-13] + _ = x[Ampere-14] + _ = x[ARM-15] + _ = x[Broadcom-16] + _ = x[Cavium-17] + _ = x[DEC-18] + _ = x[Fujitsu-19] + _ = x[Infineon-20] + _ = x[Motorola-21] + _ = x[NVIDIA-22] + _ = x[AMCC-23] + _ = x[Qualcomm-24] + _ = x[Marvell-25] + _ = x[lastVendor-26] +} + +const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor" + +var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155} + +func (i Vendor) String() string { + if i < 0 || i >= Vendor(len(_Vendor_index)-1) { + return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]] +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go new file mode 100644 index 00000000..8d2cb036 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -0,0 +1,19 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +package cpuid + +import "runtime" + +func detectOS(c *CPUInfo) bool { + // There are no hw.optional sysctl values for the below features on Mac OS 11.0 + // to detect their supported state dynamically. Assume the CPU features that + // Apple Silicon M1 supports to be available as a minimal set of features + // to all Go programs running on darwin/arm64. + // TODO: Add more if we know them. + c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2) + c.PhysicalCores = runtime.NumCPU() + // For now assuming 1 thread per core... + c.ThreadsPerCore = 1 + c.LogicalCores = c.PhysicalCores + return true +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go new file mode 100644 index 00000000..ee278b9e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go @@ -0,0 +1,130 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file located +// here https://github.com/golang/sys/blob/master/LICENSE + +package cpuid + +import ( + "encoding/binary" + "io/ioutil" + "runtime" +) + +// HWCAP bits. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func detectOS(c *CPUInfo) bool { + // For now assuming no hyperthreading is reasonable. + c.LogicalCores = runtime.NumCPU() + c.PhysicalCores = c.LogicalCores + c.ThreadsPerCore = 1 + if hwcap == 0 { + // We did not get values from the runtime. + // Try reading /proc/self/auxv + + // From https://github.com/golang/sys + const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + uintSize = int(32 << (^uint(0) >> 63)) + ) + + buf, err := ioutil.ReadFile("/proc/self/auxv") + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return false + } + bo := binary.LittleEndian + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwcap = val + case _AT_HWCAP2: + // Not used + } + } + if hwcap == 0 { + return false + } + } + + // HWCap was populated by the runtime from the auxiliary vector. + // Use HWCap information since reading aarch64 system registers + // is not supported in user space on older linux kernels. + c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM) + c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID) + c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32) + c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) + c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) + c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) + c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) + c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) + c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) + c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) + c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512) + c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3) + c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4) + c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE) + + // The Samsung S9+ kernel reports support for atomics, but not all cores + // actually support them, resulting in SIGILL. See issue #28431. + // TODO(elias.naur): Only disable the optimization on bad chipsets on android. + c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS) + + return true +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go new file mode 100644 index 00000000..8733ba34 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +//go:build arm64 && !linux && !darwin +// +build arm64,!linux,!darwin + +package cpuid + +import "runtime" + +func detectOS(c *CPUInfo) bool { + c.PhysicalCores = runtime.NumCPU() + // For now assuming 1 thread per core... + c.ThreadsPerCore = 1 + c.LogicalCores = c.PhysicalCores + return false +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go new file mode 100644 index 00000000..f8f201b5 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go @@ -0,0 +1,8 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//go:build nounsafe +// +build nounsafe + +package cpuid + +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go new file mode 100644 index 00000000..92af622e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go @@ -0,0 +1,11 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//go:build !nounsafe +// +build !nounsafe + +package cpuid + +import _ "unsafe" // needed for go:linkname + +//go:linkname hwcap internal/cpu.HWCap +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh new file mode 100644 index 00000000..471d986d --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +go tool dist list | while IFS=/ read os arch; do + echo "Checking $os/$arch..." + echo " normal" + GOARCH=$arch GOOS=$os go build -o /dev/null . + echo " noasm" + GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null . + echo " appengine" + GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null . + echo " noasm,appengine" + GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null . +done diff --git a/vendor/github.com/mattermost/logr/config.go b/vendor/github.com/mattermost/logr/config.go deleted file mode 100644 index 83d4b0c1..00000000 --- a/vendor/github.com/mattermost/logr/config.go +++ /dev/null @@ -1,11 +0,0 @@ -package logr - -import ( - "fmt" - - "github.com/wiggin77/cfg" -) - -func ConfigLogger(config *cfg.Config) error { - return fmt.Errorf("Not implemented yet") -} diff --git a/vendor/github.com/mattermost/logr/filter.go b/vendor/github.com/mattermost/logr/filter.go deleted file mode 100644 index 6e654cd7..00000000 --- a/vendor/github.com/mattermost/logr/filter.go +++ /dev/null @@ -1,26 +0,0 @@ -package logr - -// LevelID is the unique id of each level. -type LevelID uint - -// Level provides a mechanism to enable/disable specific log lines. -type Level struct { - ID LevelID - Name string - Stacktrace bool -} - -// String returns the name of this level. -func (level Level) String() string { - return level.Name -} - -// Filter allows targets to determine which Level(s) are active -// for logging and which Level(s) require a stack trace to be output. -// A default implementation using "panic, fatal..." is provided, and -// a more flexible alternative implementation is also provided that -// allows any number of custom levels. -type Filter interface { - IsEnabled(Level) bool - IsStacktraceEnabled(Level) bool -} diff --git a/vendor/github.com/mattermost/logr/format/json.go b/vendor/github.com/mattermost/logr/format/json.go deleted file mode 100644 index 8f56c6cb..00000000 --- a/vendor/github.com/mattermost/logr/format/json.go +++ /dev/null @@ -1,273 +0,0 @@ -package format - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "sync" - "time" - - "github.com/francoispqt/gojay" - "github.com/mattermost/logr" -) - -// ContextField is a name/value pair within the context fields. -type ContextField struct { - Key string - Val interface{} -} - -// JSON formats log records as JSON. -type JSON struct { - // DisableTimestamp disables output of timestamp field. - DisableTimestamp bool - // DisableLevel disables output of level field. - DisableLevel bool - // DisableMsg disables output of msg field. - DisableMsg bool - // DisableContext disables output of all context fields. - DisableContext bool - // DisableStacktrace disables output of stack trace. - DisableStacktrace bool - - // TimestampFormat is an optional format for timestamps. If empty - // then DefTimestampFormat is used. - TimestampFormat string - - // Deprecated: this has no effect. - Indent string - - // EscapeHTML determines if certain characters (e.g. `<`, `>`, `&`) - // are escaped. - EscapeHTML bool - - // KeyTimestamp overrides the timestamp field key name. - KeyTimestamp string - - // KeyLevel overrides the level field key name. - KeyLevel string - - // KeyMsg overrides the msg field key name. - KeyMsg string - - // KeyContextFields when not empty will group all context fields - // under this key. - KeyContextFields string - - // KeyStacktrace overrides the stacktrace field key name. - KeyStacktrace string - - // ContextSorter allows custom sorting for the context fields. - ContextSorter func(fields logr.Fields) []ContextField - - once sync.Once -} - -// Format converts a log record to bytes in JSON format. -func (j *JSON) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { - j.once.Do(j.applyDefaultKeyNames) - - if buf == nil { - buf = &bytes.Buffer{} - } - enc := gojay.BorrowEncoder(buf) - defer func() { - enc.Release() - }() - - sorter := j.ContextSorter - if sorter == nil { - sorter = j.defaultContextSorter - } - - jlr := JSONLogRec{ - LogRec: rec, - JSON: j, - stacktrace: stacktrace, - sorter: sorter, - } - - err := enc.EncodeObject(jlr) - if err != nil { - return nil, err - } - buf.WriteByte('\n') - return buf, nil -} - -func (j *JSON) applyDefaultKeyNames() { - if j.KeyTimestamp == "" { - j.KeyTimestamp = "timestamp" - } - if j.KeyLevel == "" { - j.KeyLevel = "level" - } - if j.KeyMsg == "" { - j.KeyMsg = "msg" - } - if j.KeyStacktrace == "" { - j.KeyStacktrace = "stacktrace" - } -} - -// defaultContextSorter sorts the context fields alphabetically by key. -func (j *JSON) defaultContextSorter(fields logr.Fields) []ContextField { - keys := make([]string, 0, len(fields)) - for k := range fields { - keys = append(keys, k) - } - sort.Strings(keys) - - cf := make([]ContextField, 0, len(keys)) - for _, k := range keys { - cf = append(cf, ContextField{Key: k, Val: fields[k]}) - } - return cf -} - -// JSONLogRec decorates a LogRec adding JSON encoding. -type JSONLogRec struct { - *logr.LogRec - *JSON - stacktrace bool - sorter func(fields logr.Fields) []ContextField -} - -// MarshalJSONObject encodes the LogRec as JSON. -func (rec JSONLogRec) MarshalJSONObject(enc *gojay.Encoder) { - if !rec.DisableTimestamp { - timestampFmt := rec.TimestampFormat - if timestampFmt == "" { - timestampFmt = logr.DefTimestampFormat - } - time := rec.Time() - enc.AddTimeKey(rec.KeyTimestamp, &time, timestampFmt) - } - if !rec.DisableLevel { - enc.AddStringKey(rec.KeyLevel, rec.Level().Name) - } - if !rec.DisableMsg { - enc.AddStringKey(rec.KeyMsg, rec.Msg()) - } - if !rec.DisableContext { - ctxFields := rec.sorter(rec.Fields()) - if rec.KeyContextFields != "" { - enc.AddObjectKey(rec.KeyContextFields, jsonFields(ctxFields)) - } else { - if len(ctxFields) > 0 { - for _, cf := range ctxFields { - key := rec.prefixCollision(cf.Key) - encodeField(enc, key, cf.Val) - } - } - } - } - if rec.stacktrace && !rec.DisableStacktrace { - frames := rec.StackFrames() - if len(frames) > 0 { - enc.AddArrayKey(rec.KeyStacktrace, stackFrames(frames)) - } - } - -} - -// IsNil returns true if the LogRec pointer is nil. -func (rec JSONLogRec) IsNil() bool { - return rec.LogRec == nil -} - -func (rec JSONLogRec) prefixCollision(key string) string { - switch key { - case rec.KeyTimestamp, rec.KeyLevel, rec.KeyMsg, rec.KeyStacktrace: - return rec.prefixCollision("_" + key) - } - return key -} - -type stackFrames []runtime.Frame - -// MarshalJSONArray encodes stackFrames slice as JSON. -func (s stackFrames) MarshalJSONArray(enc *gojay.Encoder) { - for _, frame := range s { - enc.AddObject(stackFrame(frame)) - } -} - -// IsNil returns true if stackFrames is empty slice. -func (s stackFrames) IsNil() bool { - return len(s) == 0 -} - -type stackFrame runtime.Frame - -// MarshalJSONArray encodes stackFrame as JSON. -func (f stackFrame) MarshalJSONObject(enc *gojay.Encoder) { - enc.AddStringKey("Function", f.Function) - enc.AddStringKey("File", f.File) - enc.AddIntKey("Line", f.Line) -} - -func (f stackFrame) IsNil() bool { - return false -} - -type jsonFields []ContextField - -// MarshalJSONObject encodes Fields map to JSON. -func (f jsonFields) MarshalJSONObject(enc *gojay.Encoder) { - for _, ctxField := range f { - encodeField(enc, ctxField.Key, ctxField.Val) - } -} - -// IsNil returns true if map is nil. -func (f jsonFields) IsNil() bool { - return f == nil -} - -func encodeField(enc *gojay.Encoder, key string, val interface{}) { - switch vt := val.(type) { - case gojay.MarshalerJSONObject: - enc.AddObjectKey(key, vt) - case gojay.MarshalerJSONArray: - enc.AddArrayKey(key, vt) - case string: - enc.AddStringKey(key, vt) - case error: - enc.AddStringKey(key, vt.Error()) - case bool: - enc.AddBoolKey(key, vt) - case int: - enc.AddIntKey(key, vt) - case int64: - enc.AddInt64Key(key, vt) - case int32: - enc.AddIntKey(key, int(vt)) - case int16: - enc.AddIntKey(key, int(vt)) - case int8: - enc.AddIntKey(key, int(vt)) - case uint64: - enc.AddIntKey(key, int(vt)) - case uint32: - enc.AddIntKey(key, int(vt)) - case uint16: - enc.AddIntKey(key, int(vt)) - case uint8: - enc.AddIntKey(key, int(vt)) - case float64: - enc.AddFloatKey(key, vt) - case float32: - enc.AddFloat32Key(key, vt) - case *gojay.EmbeddedJSON: - enc.AddEmbeddedJSONKey(key, vt) - case time.Time: - enc.AddTimeKey(key, &vt, logr.DefTimestampFormat) - case *time.Time: - enc.AddTimeKey(key, vt, logr.DefTimestampFormat) - default: - s := fmt.Sprintf("%v", vt) - enc.AddStringKey(key, s) - } -} diff --git a/vendor/github.com/mattermost/logr/format/plain.go b/vendor/github.com/mattermost/logr/format/plain.go deleted file mode 100644 index 3fa92b49..00000000 --- a/vendor/github.com/mattermost/logr/format/plain.go +++ /dev/null @@ -1,75 +0,0 @@ -package format - -import ( - "bytes" - "fmt" - - "github.com/mattermost/logr" -) - -// Plain is the simplest formatter, outputting only text with -// no colors. -type Plain struct { - // DisableTimestamp disables output of timestamp field. - DisableTimestamp bool - // DisableLevel disables output of level field. - DisableLevel bool - // DisableMsg disables output of msg field. - DisableMsg bool - // DisableContext disables output of all context fields. - DisableContext bool - // DisableStacktrace disables output of stack trace. - DisableStacktrace bool - - // Delim is an optional delimiter output between each log field. - // Defaults to a single space. - Delim string - - // TimestampFormat is an optional format for timestamps. If empty - // then DefTimestampFormat is used. - TimestampFormat string -} - -// Format converts a log record to bytes. -func (p *Plain) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { - delim := p.Delim - if delim == "" { - delim = " " - } - if buf == nil { - buf = &bytes.Buffer{} - } - - timestampFmt := p.TimestampFormat - if timestampFmt == "" { - timestampFmt = logr.DefTimestampFormat - } - - if !p.DisableTimestamp { - var arr [128]byte - tbuf := rec.Time().AppendFormat(arr[:0], timestampFmt) - buf.Write(tbuf) - buf.WriteString(delim) - } - if !p.DisableLevel { - fmt.Fprintf(buf, "%v%s", rec.Level().Name, delim) - } - if !p.DisableMsg { - fmt.Fprint(buf, rec.Msg(), delim) - } - if !p.DisableContext { - ctx := rec.Fields() - if len(ctx) > 0 { - logr.WriteFields(buf, ctx, " ") - } - } - if stacktrace && !p.DisableStacktrace { - frames := rec.StackFrames() - if len(frames) > 0 { - buf.WriteString("\n") - logr.WriteStacktrace(buf, rec.StackFrames()) - } - } - buf.WriteString("\n") - return buf, nil -} diff --git a/vendor/github.com/mattermost/logr/formatter.go b/vendor/github.com/mattermost/logr/formatter.go deleted file mode 100644 index bb8df2d4..00000000 --- a/vendor/github.com/mattermost/logr/formatter.go +++ /dev/null @@ -1,119 +0,0 @@ -package logr - -import ( - "bytes" - "fmt" - "io" - "runtime" - "sort" -) - -// Formatter turns a LogRec into a formatted string. -type Formatter interface { - // Format converts a log record to bytes. If buf is not nil then it will be - // be filled with the formatted results, otherwise a new buffer will be allocated. - Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) -} - -const ( - // DefTimestampFormat is the default time stamp format used by - // Plain formatter and others. - DefTimestampFormat = "2006-01-02 15:04:05.000 Z07:00" -) - -// DefaultFormatter is the default formatter, outputting only text with -// no colors and a space delimiter. Use `format.Plain` instead. -type DefaultFormatter struct { -} - -// Format converts a log record to bytes. -func (p *DefaultFormatter) Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { - if buf == nil { - buf = &bytes.Buffer{} - } - delim := " " - timestampFmt := DefTimestampFormat - - fmt.Fprintf(buf, "%s%s", rec.Time().Format(timestampFmt), delim) - fmt.Fprintf(buf, "%v%s", rec.Level(), delim) - fmt.Fprint(buf, rec.Msg(), delim) - - ctx := rec.Fields() - if len(ctx) > 0 { - WriteFields(buf, ctx, " ") - } - - if stacktrace { - frames := rec.StackFrames() - if len(frames) > 0 { - buf.WriteString("\n") - WriteStacktrace(buf, rec.StackFrames()) - } - } - buf.WriteString("\n") - - return buf, nil -} - -// WriteFields writes zero or more name value pairs to the io.Writer. -// The pairs are sorted by key name and output in key=value format -// with optional separator between fields. -func WriteFields(w io.Writer, flds Fields, separator string) { - keys := make([]string, 0, len(flds)) - for k := range flds { - keys = append(keys, k) - } - sort.Strings(keys) - sep := "" - for _, key := range keys { - writeField(w, key, flds[key], sep) - sep = separator - } -} - -func writeField(w io.Writer, key string, val interface{}, sep string) { - var template string - switch v := val.(type) { - case error: - val := v.Error() - if shouldQuote(val) { - template = "%s%s=%q" - } else { - template = "%s%s=%s" - } - case string: - if shouldQuote(v) { - template = "%s%s=%q" - } else { - template = "%s%s=%s" - } - default: - template = "%s%s=%v" - } - fmt.Fprintf(w, template, sep, key, val) -} - -// shouldQuote returns true if val contains any characters that might be unsafe -// when injecting log output into an aggregator, viewer or report. -func shouldQuote(val string) bool { - for _, c := range val { - if !((c >= '0' && c <= '9') || - (c >= 'a' && c <= 'z') || - (c >= 'A' && c <= 'Z')) { - return true - } - } - return false -} - -// WriteStacktrace formats and outputs a stack trace to an io.Writer. -func WriteStacktrace(w io.Writer, frames []runtime.Frame) { - for _, frame := range frames { - if frame.Function != "" { - fmt.Fprintf(w, " %s\n", frame.Function) - } - if frame.File != "" { - fmt.Fprintf(w, " %s:%d\n", frame.File, frame.Line) - } - } -} diff --git a/vendor/github.com/mattermost/logr/levelcustom.go b/vendor/github.com/mattermost/logr/levelcustom.go deleted file mode 100644 index 384fe4e9..00000000 --- a/vendor/github.com/mattermost/logr/levelcustom.go +++ /dev/null @@ -1,45 +0,0 @@ -package logr - -import ( - "sync" -) - -// CustomFilter allows targets to enable logging via a list of levels. -type CustomFilter struct { - mux sync.RWMutex - levels map[LevelID]Level -} - -// IsEnabled returns true if the specified Level exists in this list. -func (st *CustomFilter) IsEnabled(level Level) bool { - st.mux.RLock() - defer st.mux.RUnlock() - _, ok := st.levels[level.ID] - return ok -} - -// IsStacktraceEnabled returns true if the specified Level requires a stack trace. -func (st *CustomFilter) IsStacktraceEnabled(level Level) bool { - st.mux.RLock() - defer st.mux.RUnlock() - lvl, ok := st.levels[level.ID] - if ok { - return lvl.Stacktrace - } - return false -} - -// Add adds one or more levels to the list. Adding a level enables logging for -// that level on any targets using this CustomFilter. -func (st *CustomFilter) Add(levels ...Level) { - st.mux.Lock() - defer st.mux.Unlock() - - if st.levels == nil { - st.levels = make(map[LevelID]Level) - } - - for _, s := range levels { - st.levels[s.ID] = s - } -} diff --git a/vendor/github.com/mattermost/logr/levelstd.go b/vendor/github.com/mattermost/logr/levelstd.go deleted file mode 100644 index f5e0fa46..00000000 --- a/vendor/github.com/mattermost/logr/levelstd.go +++ /dev/null @@ -1,37 +0,0 @@ -package logr - -// StdFilter allows targets to filter via classic log levels where any level -// beyond a certain verbosity/severity is enabled. -type StdFilter struct { - Lvl Level - Stacktrace Level -} - -// IsEnabled returns true if the specified Level is at or above this verbosity. Also -// determines if a stack trace is required. -func (lt StdFilter) IsEnabled(level Level) bool { - return level.ID <= lt.Lvl.ID -} - -// IsStacktraceEnabled returns true if the specified Level requires a stack trace. -func (lt StdFilter) IsStacktraceEnabled(level Level) bool { - return level.ID <= lt.Stacktrace.ID -} - -var ( - // Panic is the highest level of severity. Logs the message and then panics. - Panic = Level{ID: 0, Name: "panic"} - // Fatal designates a catastrophic error. Logs the message and then calls - // `logr.Exit(1)`. - Fatal = Level{ID: 1, Name: "fatal"} - // Error designates a serious but possibly recoverable error. - Error = Level{ID: 2, Name: "error"} - // Warn designates non-critical error. - Warn = Level{ID: 3, Name: "warn"} - // Info designates information regarding application events. - Info = Level{ID: 4, Name: "info"} - // Debug designates verbose information typically used for debugging. - Debug = Level{ID: 5, Name: "debug"} - // Trace designates the highest verbosity of log output. - Trace = Level{ID: 6, Name: "trace"} -) diff --git a/vendor/github.com/mattermost/logr/logger.go b/vendor/github.com/mattermost/logr/logger.go deleted file mode 100644 index c2386312..00000000 --- a/vendor/github.com/mattermost/logr/logger.go +++ /dev/null @@ -1,218 +0,0 @@ -package logr - -import ( - "fmt" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Logger provides context for logging via fields. -type Logger struct { - logr *Logr - fields Fields -} - -// Logr returns the `Logr` instance that created this `Logger`. -func (logger Logger) Logr() *Logr { - return logger.logr -} - -// WithField creates a new `Logger` with any existing fields -// plus the new one. -func (logger Logger) WithField(key string, value interface{}) Logger { - return logger.WithFields(Fields{key: value}) -} - -// WithFields creates a new `Logger` with any existing fields -// plus the new ones. -func (logger Logger) WithFields(fields Fields) Logger { - l := Logger{logr: logger.logr} - // if parent has no fields then avoid creating a new map. - oldLen := len(logger.fields) - if oldLen == 0 { - l.fields = fields - return l - } - - l.fields = make(Fields, len(fields)+oldLen) - for k, v := range logger.fields { - l.fields[k] = v - } - for k, v := range fields { - l.fields[k] = v - } - return l -} - -// Log checks that the level matches one or more targets, and -// if so, generates a log record that is added to the Logr queue. -// Arguments are handled in the manner of fmt.Print. -func (logger Logger) Log(lvl Level, args ...interface{}) { - status := logger.logr.IsLevelEnabled(lvl) - if status.Enabled { - rec := NewLogRec(lvl, logger, "", args, status.Stacktrace) - logger.logr.enqueue(rec) - } -} - -// Trace is a convenience method equivalent to `Log(TraceLevel, args...)`. -func (logger Logger) Trace(args ...interface{}) { - logger.Log(Trace, args...) -} - -// Debug is a convenience method equivalent to `Log(DebugLevel, args...)`. -func (logger Logger) Debug(args ...interface{}) { - logger.Log(Debug, args...) -} - -// Print ensures compatibility with std lib logger. -func (logger Logger) Print(args ...interface{}) { - logger.Info(args...) -} - -// Info is a convenience method equivalent to `Log(InfoLevel, args...)`. -func (logger Logger) Info(args ...interface{}) { - logger.Log(Info, args...) -} - -// Warn is a convenience method equivalent to `Log(WarnLevel, args...)`. -func (logger Logger) Warn(args ...interface{}) { - logger.Log(Warn, args...) -} - -// Error is a convenience method equivalent to `Log(ErrorLevel, args...)`. -func (logger Logger) Error(args ...interface{}) { - logger.Log(Error, args...) -} - -// Fatal is a convenience method equivalent to `Log(FatalLevel, args...)` -// followed by a call to os.Exit(1). -func (logger Logger) Fatal(args ...interface{}) { - logger.Log(Fatal, args...) - logger.logr.exit(1) -} - -// Panic is a convenience method equivalent to `Log(PanicLevel, args...)` -// followed by a call to panic(). -func (logger Logger) Panic(args ...interface{}) { - logger.Log(Panic, args...) - panic(fmt.Sprint(args...)) -} - -// -// Printf style -// - -// Logf checks that the level matches one or more targets, and -// if so, generates a log record that is added to the main -// queue (channel). Arguments are handled in the manner of fmt.Printf. -func (logger Logger) Logf(lvl Level, format string, args ...interface{}) { - status := logger.logr.IsLevelEnabled(lvl) - if status.Enabled { - rec := NewLogRec(lvl, logger, format, args, status.Stacktrace) - logger.logr.enqueue(rec) - } -} - -// Tracef is a convenience method equivalent to `Logf(TraceLevel, args...)`. -func (logger Logger) Tracef(format string, args ...interface{}) { - logger.Logf(Trace, format, args...) -} - -// Debugf is a convenience method equivalent to `Logf(DebugLevel, args...)`. -func (logger Logger) Debugf(format string, args ...interface{}) { - logger.Logf(Debug, format, args...) -} - -// Infof is a convenience method equivalent to `Logf(InfoLevel, args...)`. -func (logger Logger) Infof(format string, args ...interface{}) { - logger.Logf(Info, format, args...) -} - -// Printf ensures compatibility with std lib logger. -func (logger Logger) Printf(format string, args ...interface{}) { - logger.Infof(format, args...) -} - -// Warnf is a convenience method equivalent to `Logf(WarnLevel, args...)`. -func (logger Logger) Warnf(format string, args ...interface{}) { - logger.Logf(Warn, format, args...) -} - -// Errorf is a convenience method equivalent to `Logf(ErrorLevel, args...)`. -func (logger Logger) Errorf(format string, args ...interface{}) { - logger.Logf(Error, format, args...) -} - -// Fatalf is a convenience method equivalent to `Logf(FatalLevel, args...)` -// followed by a call to os.Exit(1). -func (logger Logger) Fatalf(format string, args ...interface{}) { - logger.Logf(Fatal, format, args...) - logger.logr.exit(1) -} - -// Panicf is a convenience method equivalent to `Logf(PanicLevel, args...)` -// followed by a call to panic(). -func (logger Logger) Panicf(format string, args ...interface{}) { - logger.Logf(Panic, format, args...) -} - -// -// Println style -// - -// Logln checks that the level matches one or more targets, and -// if so, generates a log record that is added to the main -// queue (channel). Arguments are handled in the manner of fmt.Println. -func (logger Logger) Logln(lvl Level, args ...interface{}) { - status := logger.logr.IsLevelEnabled(lvl) - if status.Enabled { - rec := NewLogRec(lvl, logger, "", args, status.Stacktrace) - rec.newline = true - logger.logr.enqueue(rec) - } -} - -// Traceln is a convenience method equivalent to `Logln(TraceLevel, args...)`. -func (logger Logger) Traceln(args ...interface{}) { - logger.Logln(Trace, args...) -} - -// Debugln is a convenience method equivalent to `Logln(DebugLevel, args...)`. -func (logger Logger) Debugln(args ...interface{}) { - logger.Logln(Debug, args...) -} - -// Infoln is a convenience method equivalent to `Logln(InfoLevel, args...)`. -func (logger Logger) Infoln(args ...interface{}) { - logger.Logln(Info, args...) -} - -// Println ensures compatibility with std lib logger. -func (logger Logger) Println(args ...interface{}) { - logger.Infoln(args...) -} - -// Warnln is a convenience method equivalent to `Logln(WarnLevel, args...)`. -func (logger Logger) Warnln(args ...interface{}) { - logger.Logln(Warn, args...) -} - -// Errorln is a convenience method equivalent to `Logln(ErrorLevel, args...)`. -func (logger Logger) Errorln(args ...interface{}) { - logger.Logln(Error, args...) -} - -// Fatalln is a convenience method equivalent to `Logln(FatalLevel, args...)` -// followed by a call to os.Exit(1). -func (logger Logger) Fatalln(args ...interface{}) { - logger.Logln(Fatal, args...) - logger.logr.exit(1) -} - -// Panicln is a convenience method equivalent to `Logln(PanicLevel, args...)` -// followed by a call to panic(). -func (logger Logger) Panicln(args ...interface{}) { - logger.Logln(Panic, args...) -} diff --git a/vendor/github.com/mattermost/logr/logr.go b/vendor/github.com/mattermost/logr/logr.go deleted file mode 100644 index a293c16b..00000000 --- a/vendor/github.com/mattermost/logr/logr.go +++ /dev/null @@ -1,464 +0,0 @@ -package logr - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "sync" - "time" - - "github.com/wiggin77/cfg" - "github.com/wiggin77/merror" -) - -// Logr maintains a list of log targets and accepts incoming -// log records. -type Logr struct { - tmux sync.RWMutex // target mutex - targets []Target - - mux sync.RWMutex - maxQueueSizeActual int - in chan *LogRec - done chan struct{} - once sync.Once - shutdown bool - lvlCache levelCache - - bufferPool sync.Pool - - // MaxQueueSize is the maximum number of log records that can be queued. - // If exceeded, `OnQueueFull` is called which determines if the log - // record will be dropped or block until add is successful. - // If this is modified, it must be done before `Configure` or - // `AddTarget`. Defaults to DefaultMaxQueueSize. - MaxQueueSize int - - // OnLoggerError, when not nil, is called any time an internal - // logging error occurs. For example, this can happen when a - // target cannot connect to its data sink. - OnLoggerError func(error) - - // OnQueueFull, when not nil, is called on an attempt to add - // a log record to a full Logr queue. - // `MaxQueueSize` can be used to modify the maximum queue size. - // This function should return quickly, with a bool indicating whether - // the log record should be dropped (true) or block until the log record - // is successfully added (false). If nil then blocking (false) is assumed. - OnQueueFull func(rec *LogRec, maxQueueSize int) bool - - // OnTargetQueueFull, when not nil, is called on an attempt to add - // a log record to a full target queue provided the target supports reporting - // this condition. - // This function should return quickly, with a bool indicating whether - // the log record should be dropped (true) or block until the log record - // is successfully added (false). If nil then blocking (false) is assumed. - OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool - - // OnExit, when not nil, is called when a FatalXXX style log API is called. - // When nil, then the default behavior is to cleanly shut down this Logr and - // call `os.Exit(code)`. - OnExit func(code int) - - // OnPanic, when not nil, is called when a PanicXXX style log API is called. - // When nil, then the default behavior is to cleanly shut down this Logr and - // call `panic(err)`. - OnPanic func(err interface{}) - - // EnqueueTimeout is the amount of time a log record can take to be queued. - // This only applies to blocking enqueue which happen after `logr.OnQueueFull` - // is called and returns false. - EnqueueTimeout time.Duration - - // ShutdownTimeout is the amount of time `logr.Shutdown` can execute before - // timing out. - ShutdownTimeout time.Duration - - // FlushTimeout is the amount of time `logr.Flush` can execute before - // timing out. - FlushTimeout time.Duration - - // UseSyncMapLevelCache can be set to true before the first target is added - // when high concurrency (e.g. >32 cores) is expected. This may improve - // performance with large numbers of cores - benchmark for your use case. - UseSyncMapLevelCache bool - - // MaxPooledFormatBuffer determines the maximum size of a buffer that can be - // pooled. To reduce allocations, the buffers needed during formatting (etc) - // are pooled. A very large log item will grow a buffer that could stay in - // memory indefinitely. This settings lets you control how big a pooled buffer - // can be - anything larger will be garbage collected after use. - // Defaults to 1MB. - MaxPooledBuffer int - - // DisableBufferPool when true disables the buffer pool. See MaxPooledBuffer. - DisableBufferPool bool -} - -// Configure adds/removes targets via the supplied `Config`. -func (logr *Logr) Configure(config *cfg.Config) error { - // TODO - return fmt.Errorf("not implemented yet") -} - -// AddTarget adds a target to the logger which will receive -// log records for outputting. -func (logr *Logr) AddTarget(target Target) error { - logr.mux.Lock() - defer logr.mux.Unlock() - - if logr.shutdown { - return fmt.Errorf("logr shut down") - } - - logr.tmux.Lock() - defer logr.tmux.Unlock() - logr.targets = append(logr.targets, target) - - logr.once.Do(func() { - logr.maxQueueSizeActual = logr.MaxQueueSize - if logr.maxQueueSizeActual == 0 { - logr.maxQueueSizeActual = DefaultMaxQueueSize - } - if logr.maxQueueSizeActual < 0 { - logr.maxQueueSizeActual = 0 - } - logr.in = make(chan *LogRec, logr.maxQueueSizeActual) - logr.done = make(chan struct{}) - if logr.UseSyncMapLevelCache { - logr.lvlCache = &syncMapLevelCache{} - } else { - logr.lvlCache = &arrayLevelCache{} - } - if logr.MaxPooledBuffer == 0 { - logr.MaxPooledBuffer = DefaultMaxPooledBuffer - } - logr.bufferPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - } - logr.lvlCache.setup() - go logr.start() - }) - logr.resetLevelCache() - return nil -} - -// NewLogger creates a Logger using defaults. A `Logger` is light-weight -// enough to create on-demand, but typically one or more Loggers are -// created and re-used. -func (logr *Logr) NewLogger() Logger { - logger := Logger{logr: logr} - return logger -} - -var levelStatusDisabled = LevelStatus{} - -// IsLevelEnabled returns true if at least one target has the specified -// level enabled. The result is cached so that subsequent checks are fast. -func (logr *Logr) IsLevelEnabled(lvl Level) LevelStatus { - // Check cache. lvlCache may still be nil if no targets added. - if logr.lvlCache == nil { - return levelStatusDisabled - } - status, ok := logr.lvlCache.get(lvl.ID) - if ok { - return status - } - - logr.mux.RLock() - defer logr.mux.RUnlock() - - // Don't accept new log records after shutdown. - if logr.shutdown { - return levelStatusDisabled - } - - status = LevelStatus{} - - // Check each target. - logr.tmux.RLock() - defer logr.tmux.RUnlock() - for _, t := range logr.targets { - e, s := t.IsLevelEnabled(lvl) - if e { - status.Enabled = true - if s { - status.Stacktrace = true - break // if both enabled then no sense checking more targets - } - } - } - - // Cache and return the result. - if err := logr.lvlCache.put(lvl.ID, status); err != nil { - logr.ReportError(err) - return LevelStatus{} - } - return status -} - -// ResetLevelCache resets the cached results of `IsLevelEnabled`. This is -// called any time a Target is added or a target's level is changed. -func (logr *Logr) ResetLevelCache() { - // Write lock so that new cache entries cannot be stored while we - // clear the cache. - logr.mux.Lock() - defer logr.mux.Unlock() - logr.resetLevelCache() -} - -// resetLevelCache empties the level cache without locking. -// mux.Lock must be held before calling this function. -func (logr *Logr) resetLevelCache() { - // lvlCache may still be nil if no targets added. - if logr.lvlCache != nil { - logr.lvlCache.clear() - } -} - -// enqueue adds a log record to the logr queue. If the queue is full then -// this function either blocks or the log record is dropped, depending on -// the result of calling `OnQueueFull`. -func (logr *Logr) enqueue(rec *LogRec) { - if logr.in == nil { - logr.ReportError(fmt.Errorf("AddTarget or Configure must be called before enqueue")) - } - - select { - case logr.in <- rec: - default: - if logr.OnQueueFull != nil && logr.OnQueueFull(rec, logr.maxQueueSizeActual) { - return // drop the record - } - select { - case <-time.After(logr.enqueueTimeout()): - logr.ReportError(fmt.Errorf("enqueue timed out for log rec [%v]", rec)) - case logr.in <- rec: // block until success or timeout - } - } -} - -// exit is called by one of the FatalXXX style APIS. If `logr.OnExit` is not nil -// then that method is called, otherwise the default behavior is to shut down this -// Logr cleanly then call `os.Exit(code)`. -func (logr *Logr) exit(code int) { - if logr.OnExit != nil { - logr.OnExit(code) - return - } - - if err := logr.Shutdown(); err != nil { - logr.ReportError(err) - } - os.Exit(code) -} - -// panic is called by one of the PanicXXX style APIS. If `logr.OnPanic` is not nil -// then that method is called, otherwise the default behavior is to shut down this -// Logr cleanly then call `panic(err)`. -func (logr *Logr) panic(err interface{}) { - if logr.OnPanic != nil { - logr.OnPanic(err) - return - } - - if err := logr.Shutdown(); err != nil { - logr.ReportError(err) - } - panic(err) -} - -// Flush blocks while flushing the logr queue and all target queues, by -// writing existing log records to valid targets. -// Any attempts to add new log records will block until flush is complete. -// `logr.FlushTimeout` determines how long flush can execute before -// timing out. Use `IsTimeoutError` to determine if the returned error is -// due to a timeout. -func (logr *Logr) Flush() error { - logr.mux.Lock() - defer logr.mux.Unlock() - - ctx, cancel := context.WithTimeout(context.Background(), logr.flushTimeout()) - defer cancel() - - rec := newFlushLogRec(logr.NewLogger()) - logr.enqueue(rec) - - select { - case <-ctx.Done(): - return newTimeoutError("logr queue shutdown timeout") - case <-rec.flush: - } - return nil -} - -// Shutdown cleanly stops the logging engine after making best efforts -// to flush all targets. Call this function right before application -// exit - logr cannot be restarted once shut down. -// `logr.ShutdownTimeout` determines how long shutdown can execute before -// timing out. Use `IsTimeoutError` to determine if the returned error is -// due to a timeout. -func (logr *Logr) Shutdown() error { - logr.mux.Lock() - if logr.shutdown { - logr.mux.Unlock() - return errors.New("Shutdown called again after shut down") - } - logr.shutdown = true - logr.resetLevelCache() - logr.mux.Unlock() - - errs := merror.New() - - ctx, cancel := context.WithTimeout(context.Background(), logr.shutdownTimeout()) - defer cancel() - - // close the incoming channel and wait for read loop to exit. - if logr.in != nil { - close(logr.in) - select { - case <-ctx.Done(): - errs.Append(newTimeoutError("logr queue shutdown timeout")) - case <-logr.done: - } - } - - // logr.in channel should now be drained to targets and no more log records - // can be added. - logr.tmux.RLock() - defer logr.tmux.RUnlock() - for _, t := range logr.targets { - err := t.Shutdown(ctx) - if err != nil { - errs.Append(err) - } - } - return errs.ErrorOrNil() -} - -// ReportError is used to notify the host application of any internal logging errors. -// If `OnLoggerError` is not nil, it is called with the error, otherwise the error is -// output to `os.Stderr`. -func (logr *Logr) ReportError(err interface{}) { - if logr.OnLoggerError == nil { - fmt.Fprintln(os.Stderr, err) - return - } - logr.OnLoggerError(fmt.Errorf("%v", err)) -} - -// BorrowBuffer borrows a buffer from the pool. Release the buffer to reduce garbage collection. -func (logr *Logr) BorrowBuffer() *bytes.Buffer { - if logr.DisableBufferPool { - return &bytes.Buffer{} - } - return logr.bufferPool.Get().(*bytes.Buffer) -} - -// ReleaseBuffer returns a buffer to the pool to reduce garbage collection. The buffer is only -// retained if less than MaxPooledBuffer. -func (logr *Logr) ReleaseBuffer(buf *bytes.Buffer) { - if !logr.DisableBufferPool && buf.Cap() < logr.MaxPooledBuffer { - buf.Reset() - logr.bufferPool.Put(buf) - } -} - -// enqueueTimeout returns amount of time a log record can take to be queued. -// This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called -// and returns false. -func (logr *Logr) enqueueTimeout() time.Duration { - if logr.EnqueueTimeout == 0 { - return DefaultEnqueueTimeout - } - return logr.EnqueueTimeout -} - -// shutdownTimeout returns the timeout duration for `logr.Shutdown`. -func (logr *Logr) shutdownTimeout() time.Duration { - if logr.ShutdownTimeout == 0 { - return DefaultShutdownTimeout - } - return logr.ShutdownTimeout -} - -// flushTimeout returns the timeout duration for `logr.Flush`. -func (logr *Logr) flushTimeout() time.Duration { - if logr.FlushTimeout == 0 { - return DefaultFlushTimeout - } - return logr.FlushTimeout -} - -// start selects on incoming log records until done channel signals. -// Incoming log records are fanned out to all log targets. -func (logr *Logr) start() { - defer func() { - if r := recover(); r != nil { - logr.ReportError(r) - go logr.start() - } - }() - - for rec := range logr.in { - if rec.flush != nil { - logr.flush(rec.flush) - } else { - rec.prep() - logr.fanout(rec) - } - } - close(logr.done) -} - -// fanout pushes a LogRec to all targets. -func (logr *Logr) fanout(rec *LogRec) { - var target Target - defer func() { - if r := recover(); r != nil { - logr.ReportError(fmt.Errorf("fanout failed for target %s, %v", target, r)) - } - }() - - logr.tmux.RLock() - defer logr.tmux.RUnlock() - for _, target = range logr.targets { - if enabled, _ := target.IsLevelEnabled(rec.Level()); enabled { - target.Log(rec) - } - } -} - -// flush drains the queue and notifies when done. -func (logr *Logr) flush(done chan<- struct{}) { - // first drain the logr queue. -loop: - for { - var rec *LogRec - select { - case rec = <-logr.in: - if rec.flush == nil { - rec.prep() - logr.fanout(rec) - } - default: - break loop - } - } - - logger := logr.NewLogger() - - // drain all the targets; block until finished. - logr.tmux.RLock() - defer logr.tmux.RUnlock() - for _, target := range logr.targets { - rec := newFlushLogRec(logger) - target.Log(rec) - <-rec.flush - } - done <- struct{}{} -} diff --git a/vendor/github.com/mattermost/logr/target.go b/vendor/github.com/mattermost/logr/target.go deleted file mode 100644 index bab71ec2..00000000 --- a/vendor/github.com/mattermost/logr/target.go +++ /dev/null @@ -1,152 +0,0 @@ -package logr - -import ( - "context" - "fmt" - "os" - "time" -) - -// Target represents a destination for log records such as file, -// database, TCP socket, etc. -type Target interface { - // IsLevelEnabled returns true if this target should emit - // logs for the specified level. Also determines if - // a stack trace is required. - IsLevelEnabled(Level) (enabled bool, stacktrace bool) - - // Formatter returns the Formatter associated with this Target. - Formatter() Formatter - - // Log outputs the log record to this target's destination. - Log(rec *LogRec) - - // Shutdown makes best effort to flush target queue and - // frees/closes all resources. - Shutdown(ctx context.Context) error -} - -// RecordWriter can convert a LogRecord to bytes and output to some data sink. -type RecordWriter interface { - Write(rec *LogRec) error -} - -// Basic provides the basic functionality of a Target that can be used -// to more easily compose your own Targets. To use, just embed Basic -// in your target type, implement `RecordWriter`, and call `Start`. -type Basic struct { - target Target - - filter Filter - formatter Formatter - - in chan *LogRec - done chan struct{} - w RecordWriter -} - -// Start initializes this target helper and starts accepting log records for processing. -func (b *Basic) Start(target Target, rw RecordWriter, filter Filter, formatter Formatter, maxQueued int) { - if filter == nil { - filter = &StdFilter{Lvl: Fatal} - } - if formatter == nil { - formatter = &DefaultFormatter{} - } - - b.target = target - b.filter = filter - b.formatter = formatter - b.in = make(chan *LogRec, maxQueued) - b.done = make(chan struct{}, 1) - b.w = rw - go b.start() -} - -// IsLevelEnabled returns true if this target should emit -// logs for the specified level. Also determines if -// a stack trace is required. -func (b *Basic) IsLevelEnabled(lvl Level) (enabled bool, stacktrace bool) { - return b.filter.IsEnabled(lvl), b.filter.IsStacktraceEnabled(lvl) -} - -// Formatter returns the Formatter associated with this Target. -func (b *Basic) Formatter() Formatter { - return b.formatter -} - -// Shutdown stops processing log records after making best -// effort to flush queue. -func (b *Basic) Shutdown(ctx context.Context) error { - // close the incoming channel and wait for read loop to exit. - close(b.in) - select { - case <-ctx.Done(): - case <-b.done: - } - - // b.in channel should now be drained. - return nil -} - -// Log outputs the log record to this targets destination. -func (b *Basic) Log(rec *LogRec) { - lgr := rec.Logger().Logr() - select { - case b.in <- rec: - default: - handler := lgr.OnTargetQueueFull - if handler != nil && handler(b.target, rec, cap(b.in)) { - return // drop the record - } - select { - case <-time.After(lgr.enqueueTimeout()): - lgr.ReportError(fmt.Errorf("target enqueue timeout for log rec [%v]", rec)) - case b.in <- rec: // block until success or timeout - } - } -} - -// Start accepts log records via In channel and writes to the -// supplied writer, until Done channel signaled. -func (b *Basic) start() { - defer func() { - if r := recover(); r != nil { - fmt.Fprintln(os.Stderr, "Basic.start -- ", r) - go b.start() - } - }() - - for rec := range b.in { - if rec.flush != nil { - b.flush(rec.flush) - } else { - err := b.w.Write(rec) - if err != nil { - rec.Logger().Logr().ReportError(err) - } - } - } - close(b.done) -} - -// flush drains the queue and notifies when done. -func (b *Basic) flush(done chan<- struct{}) { - for { - var rec *LogRec - var err error - select { - case rec = <-b.in: - // ignore any redundant flush records. - if rec.flush == nil { - err = b.w.Write(rec) - if err != nil { - rec.Logger().Logr().ReportError(err) - } - } - default: - done <- struct{}{} - return - } - } -} diff --git a/vendor/github.com/mattermost/logr/target/syslog.go b/vendor/github.com/mattermost/logr/target/syslog.go deleted file mode 100644 index 2258fd29..00000000 --- a/vendor/github.com/mattermost/logr/target/syslog.go +++ /dev/null @@ -1,94 +0,0 @@ -// +build !windows,!nacl,!plan9 - -package target - -import ( - "context" - "fmt" - "log/syslog" - - "github.com/mattermost/logr" - "github.com/wiggin77/merror" -) - -// Syslog outputs log records to local or remote syslog. -type Syslog struct { - logr.Basic - w *syslog.Writer -} - -// SyslogParams provides parameters for dialing a syslog daemon. -type SyslogParams struct { - Network string - Raddr string - Priority syslog.Priority - Tag string -} - -// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog. -func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) { - writer, err := syslog.Dial(params.Network, params.Raddr, params.Priority, params.Tag) - if err != nil { - return nil, err - } - - s := &Syslog{w: writer} - s.Basic.Start(s, s, filter, formatter, maxQueue) - - return s, nil -} - -// Shutdown stops processing log records after making best -// effort to flush queue. -func (s *Syslog) Shutdown(ctx context.Context) error { - errs := merror.New() - - err := s.Basic.Shutdown(ctx) - errs.Append(err) - - err = s.w.Close() - errs.Append(err) - - return errs.ErrorOrNil() -} - -// Write converts the log record to bytes, via the Formatter, -// and outputs to syslog. -func (s *Syslog) Write(rec *logr.LogRec) error { - _, stacktrace := s.IsLevelEnabled(rec.Level()) - - buf := rec.Logger().Logr().BorrowBuffer() - defer rec.Logger().Logr().ReleaseBuffer(buf) - - buf, err := s.Formatter().Format(rec, stacktrace, buf) - if err != nil { - return err - } - txt := buf.String() - - switch rec.Level() { - case logr.Panic, logr.Fatal: - err = s.w.Crit(txt) - case logr.Error: - err = s.w.Err(txt) - case logr.Warn: - err = s.w.Warning(txt) - case logr.Debug, logr.Trace: - err = s.w.Debug(txt) - default: - // logr.Info plus all custom levels. - err = s.w.Info(txt) - } - - if err != nil { - reporter := rec.Logger().Logr().ReportError - reporter(fmt.Errorf("syslog write fail: %w", err)) - // syslog writer will try to reconnect. - } - return err -} - -// String returns a string representation of this target. -func (s *Syslog) String() string { - return "SyslogTarget" -} diff --git a/vendor/github.com/mattermost/logr/target/writer.go b/vendor/github.com/mattermost/logr/target/writer.go deleted file mode 100644 index b12b4760..00000000 --- a/vendor/github.com/mattermost/logr/target/writer.go +++ /dev/null @@ -1,45 +0,0 @@ -package target - -import ( - "io" - "io/ioutil" - - "github.com/mattermost/logr" -) - -// Writer outputs log records to any `io.Writer`. -type Writer struct { - logr.Basic - out io.Writer -} - -// NewWriterTarget creates a target capable of outputting log records to an io.Writer. -func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer { - if out == nil { - out = ioutil.Discard - } - w := &Writer{out: out} - w.Basic.Start(w, w, filter, formatter, maxQueue) - return w -} - -// Write converts the log record to bytes, via the Formatter, -// and outputs to the io.Writer. -func (w *Writer) Write(rec *logr.LogRec) error { - _, stacktrace := w.IsLevelEnabled(rec.Level()) - - buf := rec.Logger().Logr().BorrowBuffer() - defer rec.Logger().Logr().ReleaseBuffer(buf) - - buf, err := w.Formatter().Format(rec, stacktrace, buf) - if err != nil { - return err - } - _, err = w.out.Write(buf.Bytes()) - return err -} - -// String returns a string representation of this target. -func (w *Writer) String() string { - return "WriterTarget" -} diff --git a/vendor/github.com/mattermost/logr/.gitignore b/vendor/github.com/mattermost/logr/v2/.gitignore similarity index 96% rename from vendor/github.com/mattermost/logr/.gitignore rename to vendor/github.com/mattermost/logr/v2/.gitignore index c2c0a9e2..bac5e1c1 100644 --- a/vendor/github.com/mattermost/logr/.gitignore +++ b/vendor/github.com/mattermost/logr/v2/.gitignore @@ -34,3 +34,4 @@ logs # test apps test/cmd/testapp1/testapp1 test/cmd/simple/simple +test/cmd/gelf/gelf diff --git a/vendor/github.com/mattermost/logr/.travis.yml b/vendor/github.com/mattermost/logr/v2/.travis.yml similarity index 100% rename from vendor/github.com/mattermost/logr/.travis.yml rename to vendor/github.com/mattermost/logr/v2/.travis.yml diff --git a/vendor/github.com/mattermost/logr/LICENSE b/vendor/github.com/mattermost/logr/v2/LICENSE similarity index 100% rename from vendor/github.com/mattermost/logr/LICENSE rename to vendor/github.com/mattermost/logr/v2/LICENSE diff --git a/vendor/github.com/mattermost/logr/README.md b/vendor/github.com/mattermost/logr/v2/README.md similarity index 83% rename from vendor/github.com/mattermost/logr/README.md rename to vendor/github.com/mattermost/logr/v2/README.md index a25d6de0..9ee0f17c 100644 --- a/vendor/github.com/mattermost/logr/README.md +++ b/vendor/github.com/mattermost/logr/v2/README.md @@ -16,9 +16,9 @@ It is very much inspired by [Logrus](https://github.com/sirupsen/logrus) but add | entity | description | | ------ | ----------- | -| Logr | Engine instance typically instantiated once; used to configure logging.
```lgr := &Logr{}```| +| Logr | Engine instance typically instantiated once; used to configure logging.
```lgr,_ := logr.New()```| | Logger | Provides contextual logging via fields; lightweight, can be created once and accessed globally or create on demand.
```logger := lgr.NewLogger()```
```logger2 := logger.WithField("user", "Sam")```| -| Target | A destination for log items such as console, file, database or just about anything that can be written to. Each target has its own filter/level and formatter, and any number of targets can be added to a Logr. Targets for syslog and any io.Writer are built-in and it is easy to create your own. You can also use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).| +| Target | A destination for log items such as console, file, database or just about anything that can be written to. Each target has its own filter/level and formatter, and any number of targets can be added to a Logr. Targets for file, syslog and any io.Writer are built-in and it is easy to create your own. You can also use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).| | Filter | Determines which logging calls get written versus filtered out. Also determines which logging calls generate a stack trace.
```filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Fatal}```| | Formatter | Formats the output. Logr includes built-in formatters for JSON and plain text with delimiters. It is easy to create your own formatters or you can also use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr).
```formatter := &format.Plain{Delim: " \| "}```| @@ -26,15 +26,15 @@ It is very much inspired by [Logrus](https://github.com/sirupsen/logrus) but add ```go // Create Logr instance. -lgr := &logr.Logr{} +lgr,_ := logr.New() // Create a filter and formatter. Both can be shared by multiple // targets. filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error} -formatter := &format.Plain{Delim: " | "} +formatter := &formatters.Plain{Delim: " | "} // WriterTarget outputs to any io.Writer -t := target.NewWriterTarget(filter, formatter, os.StdOut, 1000) +t := targets.NewWriterTarget(filter, formatter, os.StdOut, 1000) lgr.AddTarget(t) // One or more Loggers can be created, shared, used concurrently, @@ -56,7 +56,7 @@ Fields allow for contextual logging, meaning information can be added to log sta Fields are added via Loggers: ```go -lgr := &Logr{} +lgr,_ := logr.New() // ... add targets ... logger := lgr.NewLogger().WithFields(logr.Fields{ "user": user, @@ -88,14 +88,14 @@ Logr also supports custom filters (logr.CustomFilter) which allow fine grained i LoginLevel := logr.Level{ID: 100, Name: "login ", Stacktrace: false} LogoutLevel := logr.Level{ID: 101, Name: "logout", Stacktrace: false} - lgr := &logr.Logr{} + lgr,_ := logr.New() // create a custom filter with custom levels. filter := &logr.CustomFilter{} filter.Add(LoginLevel, LogoutLevel) - formatter := &format.Plain{Delim: " | "} - tgr := target.NewWriterTarget(filter, formatter, os.StdOut, 1000) + formatter := &formatters.Plain{Delim: " | "} + tgr := targets.NewWriterTarget(filter, formatter, os.StdOut, 1000) lgr.AddTarget(tgr) logger := lgr.NewLogger().WithFields(logr.Fields{"user": "Bob", "role": "admin"}) @@ -113,36 +113,31 @@ You can use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) vi You can create your own target by implementing the [Target](./target.go) interface. -An easier method is to use the [logr.Basic](./target.go) type target and build your functionality on that. Basic handles all the queuing and other plumbing so you only need to implement two methods. Example target that outputs to `io.Writer`: +Example target that outputs to `io.Writer`: ```go type Writer struct { - logr.Basic out io.Writer } -func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer { +func NewWriterTarget(out io.Writer) *Writer { w := &Writer{out: out} - w.Basic.Start(w, w, filter, formatter, maxQueue) return w } +// Called once to initialize target. +func (w *Writer) Init() error { + return nil +} + // Write will always be called by a single goroutine, so no locking needed. -// Just convert a log record to a []byte using the formatter and output the -// bytes to your sink. -func (w *Writer) Write(rec *logr.LogRec) error { - _, stacktrace := w.IsLevelEnabled(rec.Level()) - - // take a buffer from the pool to avoid allocations or just allocate a new one. - buf := rec.Logger().Logr().BorrowBuffer() - defer rec.Logger().Logr().ReleaseBuffer(buf) - - buf, err := w.Formatter().Format(rec, stacktrace, buf) - if err != nil { - return err - } - _, err = w.out.Write(buf.Bytes()) - return err +func (w *Writer) Write(p []byte, rec *logr.LogRec) (int, error) { + return w.out.Write(buf.Bytes()) +} + +// Called once to cleanup/free resources for target. +func (w *Writer) Shutdown() error { + return nil } ``` diff --git a/vendor/github.com/mattermost/logr/v2/buffer.go b/vendor/github.com/mattermost/logr/v2/buffer.go new file mode 100644 index 00000000..42bf5255 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/buffer.go @@ -0,0 +1,28 @@ +package logr + +import ( + "bytes" + "sync" +) + +// Buffer provides a thread-safe buffer useful for logging to memory in unit tests. +type Buffer struct { + buf bytes.Buffer + mux sync.Mutex +} + +func (b *Buffer) Read(p []byte) (n int, err error) { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.Read(p) +} +func (b *Buffer) Write(p []byte) (n int, err error) { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.Write(p) +} +func (b *Buffer) String() string { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.String() +} diff --git a/vendor/github.com/mattermost/logr/v2/config/config.go b/vendor/github.com/mattermost/logr/v2/config/config.go new file mode 100644 index 00000000..e01a5514 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/config/config.go @@ -0,0 +1,209 @@ +package config + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/mattermost/logr/v2" + "github.com/mattermost/logr/v2/formatters" + "github.com/mattermost/logr/v2/targets" +) + +type TargetCfg struct { + Type string `json:"type"` // one of "console", "file", "tcp", "syslog", "none". + Options json.RawMessage `json:"options,omitempty"` + Format string `json:"format"` // one of "json", "plain", "gelf" + FormatOptions json.RawMessage `json:"format_options,omitempty"` + Levels []logr.Level `json:"levels"` + MaxQueueSize int `json:"maxqueuesize,omitempty"` +} + +type ConsoleOptions struct { + Out string `json:"out"` // one of "stdout", "stderr" +} + +type TargetFactory func(targetType string, options json.RawMessage) (logr.Target, error) +type FormatterFactory func(format string, options json.RawMessage) (logr.Formatter, error) + +type Factories struct { + TargetFactory TargetFactory // can be nil + FormatterFactory FormatterFactory // can be nil +} + +var removeAll = func(ti logr.TargetInfo) bool { return true } + +// ConfigureTargets replaces the current list of log targets with a new one based on a map +// of name->TargetCfg. The map of TargetCfg's would typically be serialized from a JSON +// source or can be programmatically created. +// +// An optional set of factories can be provided which will be called to create any target +// types or formatters not built-in. +// +// To append log targets to an existing config, use `(*Logr).AddTarget` or +// `(*Logr).AddTargetFromConfig` instead. +func ConfigureTargets(lgr *logr.Logr, config map[string]TargetCfg, factories *Factories) error { + if err := lgr.RemoveTargets(context.Background(), removeAll); err != nil { + return fmt.Errorf("error removing existing log targets: %w", err) + } + + if factories == nil { + factories = &Factories{nil, nil} + } + + for name, tcfg := range config { + target, err := newTarget(tcfg.Type, tcfg.Options, factories.TargetFactory) + if err != nil { + return fmt.Errorf("error creating log target %s: %w", name, err) + } + + if target == nil { + continue + } + + formatter, err := newFormatter(tcfg.Format, tcfg.FormatOptions, factories.FormatterFactory) + if err != nil { + return fmt.Errorf("error creating formatter for log target %s: %w", name, err) + } + + filter := newFilter(tcfg.Levels) + qSize := tcfg.MaxQueueSize + if qSize == 0 { + qSize = logr.DefaultMaxQueueSize + } + + if err = lgr.AddTarget(target, name, filter, formatter, qSize); err != nil { + return fmt.Errorf("error adding log target %s: %w", name, err) + } + } + return nil +} + +func newFilter(levels []logr.Level) logr.Filter { + filter := &logr.CustomFilter{} + for _, lvl := range levels { + filter.Add(lvl) + } + return filter +} + +func newTarget(targetType string, options json.RawMessage, factory TargetFactory) (logr.Target, error) { + switch strings.ToLower(targetType) { + case "console": + c := ConsoleOptions{} + if len(options) != 0 { + if err := json.Unmarshal(options, &c); err != nil { + return nil, fmt.Errorf("error decoding console target options: %w", err) + } + } + var w io.Writer + switch c.Out { + case "stderr": + w = os.Stderr + case "stdout", "": + w = os.Stdout + default: + return nil, fmt.Errorf("invalid console target option '%s'", c.Out) + } + return targets.NewWriterTarget(w), nil + case "file": + fo := targets.FileOptions{} + if len(options) == 0 { + return nil, errors.New("missing file target options") + } + if err := json.Unmarshal(options, &fo); err != nil { + return nil, fmt.Errorf("error decoding file target options: %w", err) + } + if err := fo.CheckValid(); err != nil { + return nil, fmt.Errorf("invalid file target options: %w", err) + } + return targets.NewFileTarget(fo), nil + case "tcp": + to := targets.TcpOptions{} + if len(options) == 0 { + return nil, errors.New("missing TCP target options") + } + if err := json.Unmarshal(options, &to); err != nil { + return nil, fmt.Errorf("error decoding TCP target options: %w", err) + } + if err := to.CheckValid(); err != nil { + return nil, fmt.Errorf("invalid TCP target options: %w", err) + } + return targets.NewTcpTarget(&to), nil + case "syslog": + so := targets.SyslogOptions{} + if len(options) == 0 { + return nil, errors.New("missing SysLog target options") + } + if err := json.Unmarshal(options, &so); err != nil { + return nil, fmt.Errorf("error decoding Syslog target options: %w", err) + } + if err := so.CheckValid(); err != nil { + return nil, fmt.Errorf("invalid SysLog target options: %w", err) + } + return targets.NewSyslogTarget(&so) + case "none": + return nil, nil + default: + if factory != nil { + t, err := factory(targetType, options) + if err != nil || t == nil { + return nil, fmt.Errorf("error from target factory: %w", err) + } + return t, nil + } + } + return nil, fmt.Errorf("target type '%s' is unrecogized", targetType) +} + +func newFormatter(format string, options json.RawMessage, factory FormatterFactory) (logr.Formatter, error) { + switch strings.ToLower(format) { + case "json": + j := formatters.JSON{} + if len(options) != 0 { + if err := json.Unmarshal(options, &j); err != nil { + return nil, fmt.Errorf("error decoding JSON formatter options: %w", err) + } + if err := j.CheckValid(); err != nil { + return nil, fmt.Errorf("invalid JSON formatter options: %w", err) + } + } + return &j, nil + case "plain": + p := formatters.Plain{} + if len(options) != 0 { + if err := json.Unmarshal(options, &p); err != nil { + return nil, fmt.Errorf("error decoding Plain formatter options: %w", err) + } + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("invalid plain formatter options: %w", err) + } + } + return &p, nil + case "gelf": + g := formatters.Gelf{} + if len(options) != 0 { + if err := json.Unmarshal(options, &g); err != nil { + return nil, fmt.Errorf("error decoding Gelf formatter options: %w", err) + } + if err := g.CheckValid(); err != nil { + return nil, fmt.Errorf("invalid GELF formatter options: %w", err) + } + } + return &g, nil + + default: + if factory != nil { + f, err := factory(format, options) + if err != nil || f == nil { + return nil, fmt.Errorf("error from formatter factory: %w", err) + } + return f, nil + } + } + return nil, fmt.Errorf("format '%s' is unrecogized", format) +} diff --git a/vendor/github.com/mattermost/logr/v2/config/sample-config.json b/vendor/github.com/mattermost/logr/v2/config/sample-config.json new file mode 100644 index 00000000..540bafbb --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/config/sample-config.json @@ -0,0 +1,90 @@ +{ + "sample-console": { + "type": "console", + "options": { + "out": "stdout" + }, + "format": "plain", + "format_options": { + "delim": " | " + }, + "levels": [ + {"id": 5, "name": "debug"}, + {"id": 4, "name": "info"}, + {"id": 3, "name": "warn"}, + {"id": 2, "name": "error", "stacktrace": true}, + {"id": 1, "name": "fatal", "stacktrace": true}, + {"id": 0, "name": "panic", "stacktrace": true} + ], + "maxqueuesize": 1000 + }, + "sample-file": { + "type": "file", + "options": { + "filename": "test.log", + "max_size": 1000000, + "max_age": 1, + "max_backups": 10, + "compress": true + }, + "format": "json", + "format_options": { + }, + "levels": [ + {"id": 5, "name": "debug"}, + {"id": 4, "name": "info"}, + {"id": 3, "name": "warn"}, + {"id": 2, "name": "error", "stacktrace": true}, + {"id": 1, "name": "fatal", "stacktrace": true}, + {"id": 0, "name": "panic", "stacktrace": true} + ], + "maxqueuesize": 1000 + }, + "sample-tcp": { + "type": "tcp", + "options": { + "host": "localhost", + "port": 18066, + "tls": false, + "cert": "", + "insecure": false + }, + "format": "gelf", + "format_options": { + "hostname": "server01" + }, + "levels": [ + {"id": 5, "name": "debug"}, + {"id": 4, "name": "info"}, + {"id": 3, "name": "warn"}, + {"id": 2, "name": "error", "stacktrace": true}, + {"id": 1, "name": "fatal", "stacktrace": true}, + {"id": 0, "name": "panic", "stacktrace": true} + ], + "maxqueuesize": 1000 + }, + "sample-syslog": { + "type": "syslog", + "options": { + "host": "localhost", + "port": 18066, + "tls": false, + "cert": "", + "insecure": false, + "tag": "testapp" + }, + "format": "plain", + "format_options": { + "delim": " " + }, + "levels": [ + {"id": 5, "name": "debug"}, + {"id": 4, "name": "info"}, + {"id": 3, "name": "warn"}, + {"id": 2, "name": "error", "stacktrace": true}, + {"id": 1, "name": "fatal", "stacktrace": true}, + {"id": 0, "name": "panic", "stacktrace": true} + ], + "maxqueuesize": 1000 + } +} diff --git a/vendor/github.com/mattermost/logr/const.go b/vendor/github.com/mattermost/logr/v2/const.go similarity index 98% rename from vendor/github.com/mattermost/logr/const.go rename to vendor/github.com/mattermost/logr/v2/const.go index 704d0507..29d92241 100644 --- a/vendor/github.com/mattermost/logr/const.go +++ b/vendor/github.com/mattermost/logr/v2/const.go @@ -13,7 +13,7 @@ const ( // MaxLevelID is the maximum value of a level ID. Some level cache implementations will // allocate a cache of this size. Cannot exceed uint. - MaxLevelID = 256 + MaxLevelID = 65535 // DefaultEnqueueTimeout is the default amount of time a log record can take to be queued. // This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called diff --git a/vendor/github.com/mattermost/logr/v2/field.go b/vendor/github.com/mattermost/logr/v2/field.go new file mode 100644 index 00000000..33342870 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/field.go @@ -0,0 +1,403 @@ +package logr + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "time" +) + +var ( + Comma = []byte{','} + Equals = []byte{'='} + Space = []byte{' '} + Newline = []byte{'\n'} + Quote = []byte{'"'} + Colon = []byte{':'} +) + +// LogCloner is implemented by `Any` types that require a clone to be provided +// to the logger because the original may mutate. +type LogCloner interface { + LogClone() interface{} +} + +// LogWriter is implemented by `Any` types that provide custom formatting for +// log output. A string representation of the type should be written directly to +// the `io.Writer`. +type LogWriter interface { + LogWrite(w io.Writer) error +} + +type FieldType uint8 + +const ( + UnknownType FieldType = iota + StringType + StringerType + StructType + ErrorType + BoolType + TimestampMillisType + TimeType + DurationType + Int64Type + Int32Type + IntType + Uint64Type + Uint32Type + UintType + Float64Type + Float32Type + BinaryType + ArrayType + MapType +) + +type Field struct { + Key string + Type FieldType + Integer int64 + Float float64 + String string + Interface interface{} +} + +func quoteString(w io.Writer, s string, shouldQuote func(s string) bool) error { + b := shouldQuote(s) + if b { + if _, err := w.Write(Quote); err != nil { + return err + } + } + + if _, err := w.Write([]byte(s)); err != nil { + return err + } + + if b { + if _, err := w.Write(Quote); err != nil { + return err + } + } + return nil +} + +// ValueString converts a known type to a string using default formatting. +// This is called lazily by a formatter. +// Formatters can provide custom formatting or types passed via `Any` can implement +// the `LogString` interface to generate output for logging. +// If the optional shouldQuote callback is provided, then it will be called for any +// string output that could potentially need to be quoted. +func (f Field) ValueString(w io.Writer, shouldQuote func(s string) bool) error { + if shouldQuote == nil { + shouldQuote = func(s string) bool { return false } + } + var err error + switch f.Type { + case StringType: + err = quoteString(w, f.String, shouldQuote) + + case StringerType: + s, ok := f.Interface.(fmt.Stringer) + if ok { + err = quoteString(w, s.String(), shouldQuote) + } else if f.Interface == nil { + err = quoteString(w, "", shouldQuote) + } else { + err = fmt.Errorf("invalid fmt.Stringer for key %s", f.Key) + } + + case StructType: + s, ok := f.Interface.(LogWriter) + if ok { + err = s.LogWrite(w) + break + } + // structs that do not implement LogWriter fall back to reflection via Printf. + // TODO: create custom reflection-based encoder. + _, err = fmt.Fprintf(w, "%v", f.Interface) + + case ErrorType: + // TODO: create custom error encoder. + err = quoteString(w, fmt.Sprintf("%v", f.Interface), shouldQuote) + + case BoolType: + var b bool + if f.Integer != 0 { + b = true + } + _, err = io.WriteString(w, strconv.FormatBool(b)) + + case TimestampMillisType: + ts := time.Unix(f.Integer/1000, (f.Integer%1000)*int64(time.Millisecond)) + err = quoteString(w, ts.UTC().Format(TimestampMillisFormat), shouldQuote) + + case TimeType: + t, ok := f.Interface.(time.Time) + if !ok { + err = errors.New("invalid time") + break + } + err = quoteString(w, t.Format(DefTimestampFormat), shouldQuote) + + case DurationType: + _, err = fmt.Fprintf(w, "%s", time.Duration(f.Integer)) + + case Int64Type, Int32Type, IntType: + _, err = io.WriteString(w, strconv.FormatInt(f.Integer, 10)) + + case Uint64Type, Uint32Type, UintType: + _, err = io.WriteString(w, strconv.FormatUint(uint64(f.Integer), 10)) + + case Float64Type, Float32Type: + size := 64 + if f.Type == Float32Type { + size = 32 + } + err = quoteString(w, strconv.FormatFloat(f.Float, 'f', -1, size), shouldQuote) + + case BinaryType: + b, ok := f.Interface.([]byte) + if ok { + _, err = fmt.Fprintf(w, "[%X]", b) + break + } + _, err = fmt.Fprintf(w, "[%v]", f.Interface) + + case ArrayType: + a := reflect.ValueOf(f.Interface) + arr: + for i := 0; i < a.Len(); i++ { + item := a.Index(i) + switch v := item.Interface().(type) { + case LogWriter: + if err = v.LogWrite(w); err != nil { + break arr + } + case fmt.Stringer: + if err = quoteString(w, v.String(), shouldQuote); err != nil { + break arr + } + default: + s := fmt.Sprintf("%v", v) + if err = quoteString(w, s, shouldQuote); err != nil { + break arr + } + } + if _, err = w.Write(Comma); err != nil { + break arr + } + } + + case MapType: + a := reflect.ValueOf(f.Interface) + iter := a.MapRange() + it: + for iter.Next() { + if _, err = io.WriteString(w, iter.Key().String()); err != nil { + break it + } + if _, err = w.Write(Equals); err != nil { + break it + } + val := iter.Value().Interface() + switch v := val.(type) { + case LogWriter: + if err = v.LogWrite(w); err != nil { + break it + } + case fmt.Stringer: + if err = quoteString(w, v.String(), shouldQuote); err != nil { + break it + } + default: + s := fmt.Sprintf("%v", v) + if err = quoteString(w, s, shouldQuote); err != nil { + break it + } + } + if _, err = w.Write(Comma); err != nil { + break it + } + } + + case UnknownType: + _, err = fmt.Fprintf(w, "%v", f.Interface) + + default: + err = fmt.Errorf("invalid type %d", f.Type) + } + return err +} + +func nilField(key string) Field { + return String(key, "") +} + +func fieldForAny(key string, val interface{}) Field { + switch v := val.(type) { + case LogCloner: + if v == nil { + return nilField(key) + } + c := v.LogClone() + return Field{Key: key, Type: StructType, Interface: c} + case *LogCloner: + if v == nil { + return nilField(key) + } + c := (*v).LogClone() + return Field{Key: key, Type: StructType, Interface: c} + case LogWriter: + if v == nil { + return nilField(key) + } + return Field{Key: key, Type: StructType, Interface: v} + case *LogWriter: + if v == nil { + return nilField(key) + } + return Field{Key: key, Type: StructType, Interface: *v} + case bool: + return Bool(key, v) + case *bool: + if v == nil { + return nilField(key) + } + return Bool(key, *v) + case float64: + return Float64(key, v) + case *float64: + if v == nil { + return nilField(key) + } + return Float64(key, *v) + case float32: + return Float32(key, v) + case *float32: + if v == nil { + return nilField(key) + } + return Float32(key, *v) + case int: + return Int(key, v) + case *int: + if v == nil { + return nilField(key) + } + return Int(key, *v) + case int64: + return Int64(key, v) + case *int64: + if v == nil { + return nilField(key) + } + return Int64(key, *v) + case int32: + return Int32(key, v) + case *int32: + if v == nil { + return nilField(key) + } + return Int32(key, *v) + case int16: + return Int32(key, int32(v)) + case *int16: + if v == nil { + return nilField(key) + } + return Int32(key, int32(*v)) + case int8: + return Int32(key, int32(v)) + case *int8: + if v == nil { + return nilField(key) + } + return Int32(key, int32(*v)) + case string: + return String(key, v) + case *string: + if v == nil { + return nilField(key) + } + return String(key, *v) + case uint: + return Uint(key, v) + case *uint: + if v == nil { + return nilField(key) + } + return Uint(key, *v) + case uint64: + return Uint64(key, v) + case *uint64: + if v == nil { + return nilField(key) + } + return Uint64(key, *v) + case uint32: + return Uint32(key, v) + case *uint32: + if v == nil { + return nilField(key) + } + return Uint32(key, *v) + case uint16: + return Uint32(key, uint32(v)) + case *uint16: + if v == nil { + return nilField(key) + } + return Uint32(key, uint32(*v)) + case uint8: + return Uint32(key, uint32(v)) + case *uint8: + if v == nil { + return nilField(key) + } + return Uint32(key, uint32(*v)) + case []byte: + if v == nil { + return nilField(key) + } + return Field{Key: key, Type: BinaryType, Interface: v} + case time.Time: + return Time(key, v) + case *time.Time: + if v == nil { + return nilField(key) + } + return Time(key, *v) + case time.Duration: + return Duration(key, v) + case *time.Duration: + if v == nil { + return nilField(key) + } + return Duration(key, *v) + case error: + return NamedErr(key, v) + case fmt.Stringer: + if v == nil { + return nilField(key) + } + return Field{Key: key, Type: StringerType, Interface: v} + case *fmt.Stringer: + if v == nil { + return nilField(key) + } + return Field{Key: key, Type: StringerType, Interface: *v} + default: + return Field{Key: key, Type: UnknownType, Interface: val} + } +} + +// FieldSorter provides sorting of an array of fields by key. +type FieldSorter []Field + +func (fs FieldSorter) Len() int { return len(fs) } +func (fs FieldSorter) Less(i, j int) bool { return fs[i].Key < fs[j].Key } +func (fs FieldSorter) Swap(i, j int) { fs[i], fs[j] = fs[j], fs[i] } diff --git a/vendor/github.com/mattermost/logr/v2/fieldapi.go b/vendor/github.com/mattermost/logr/v2/fieldapi.go new file mode 100644 index 00000000..58b12280 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/fieldapi.go @@ -0,0 +1,110 @@ +package logr + +import ( + "fmt" + "time" +) + +// Any picks the best supported field type based on type of val. +// For best performance when passing a struct (or struct pointer), +// implement `logr.LogWriter` on the struct, otherwise reflection +// will be used to generate a string representation. +func Any(key string, val interface{}) Field { + return fieldForAny(key, val) +} + +// Int64 constructs a field containing a key and Int64 value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: Int64Type, Integer: val} +} + +// Int32 constructs a field containing a key and Int32 value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: Int32Type, Integer: int64(val)} +} + +// Int constructs a field containing a key and Int value. +func Int(key string, val int) Field { + return Field{Key: key, Type: IntType, Integer: int64(val)} +} + +// Uint64 constructs a field containing a key and Uint64 value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: Uint64Type, Integer: int64(val)} +} + +// Uint32 constructs a field containing a key and Uint32 value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: Uint32Type, Integer: int64(val)} +} + +// Uint constructs a field containing a key and Uint value. +func Uint(key string, val uint) Field { + return Field{Key: key, Type: UintType, Integer: int64(val)} +} + +// Float64 constructs a field containing a key and Float64 value. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: Float64Type, Float: val} +} + +// Float32 constructs a field containing a key and Float32 value. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: Float32Type, Float: float64(val)} +} + +// String constructs a field containing a key and String value. +func String(key string, val string) Field { + return Field{Key: key, Type: StringType, String: val} +} + +// Stringer constructs a field containing a key and a `fmt.Stringer` value. +// The `String` method will be called in lazy fashion. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: StringerType, Interface: val} +} + +// Err constructs a field containing a default key ("error") and error value. +func Err(err error) Field { + return NamedErr("error", err) +} + +// NamedErr constructs a field containing a key and error value. +func NamedErr(key string, err error) Field { + return Field{Key: key, Type: ErrorType, Interface: err} +} + +// Bool constructs a field containing a key and bool value. +func Bool(key string, val bool) Field { + var b int64 + if val { + b = 1 + } + return Field{Key: key, Type: BoolType, Integer: b} +} + +// Time constructs a field containing a key and time.Time value. +func Time(key string, val time.Time) Field { + return Field{Key: key, Type: TimeType, Interface: val} +} + +// Duration constructs a field containing a key and time.Duration value. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: DurationType, Integer: int64(val)} +} + +// Millis constructs a field containing a key and timestamp value. +// The timestamp is expected to be milliseconds since Jan 1, 1970 UTC. +func Millis(key string, val int64) Field { + return Field{Key: key, Type: TimestampMillisType, Integer: val} +} + +// Array constructs a field containing a key and array value. +func Array(key string, val interface{}) Field { + return Field{Key: key, Type: ArrayType, Interface: val} +} + +// Map constructs a field containing a key and map value. +func Map(key string, val interface{}) Field { + return Field{Key: key, Type: MapType, Interface: val} +} diff --git a/vendor/github.com/mattermost/logr/v2/filter.go b/vendor/github.com/mattermost/logr/v2/filter.go new file mode 100644 index 00000000..a52a7cf4 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/filter.go @@ -0,0 +1,10 @@ +package logr + +// Filter allows targets to determine which Level(s) are active +// for logging and which Level(s) require a stack trace to be output. +// A default implementation using "panic, fatal..." is provided, and +// a more flexible alternative implementation is also provided that +// allows any number of custom levels. +type Filter interface { + GetEnabledLevel(level Level) (Level, bool) +} diff --git a/vendor/github.com/mattermost/logr/v2/filtercustom.go b/vendor/github.com/mattermost/logr/v2/filtercustom.go new file mode 100644 index 00000000..c20f2811 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/filtercustom.go @@ -0,0 +1,47 @@ +package logr + +import ( + "sync" +) + +// CustomFilter allows targets to enable logging via a list of discrete levels. +type CustomFilter struct { + mux sync.RWMutex + levels map[LevelID]Level +} + +// NewCustomFilter creates a filter supporting discrete log levels. +func NewCustomFilter(levels ...Level) *CustomFilter { + filter := &CustomFilter{} + filter.Add(levels...) + return filter +} + +// GetEnabledLevel returns the Level with the specified Level.ID and whether the level +// is enabled for this filter. +func (cf *CustomFilter) GetEnabledLevel(level Level) (Level, bool) { + cf.mux.RLock() + defer cf.mux.RUnlock() + levelEnabled, ok := cf.levels[level.ID] + + if ok && levelEnabled.Name == "" { + levelEnabled.Name = level.Name + } + + return levelEnabled, ok +} + +// Add adds one or more levels to the list. Adding a level enables logging for +// that level on any targets using this CustomFilter. +func (cf *CustomFilter) Add(levels ...Level) { + cf.mux.Lock() + defer cf.mux.Unlock() + + if cf.levels == nil { + cf.levels = make(map[LevelID]Level) + } + + for _, s := range levels { + cf.levels[s.ID] = s + } +} diff --git a/vendor/github.com/mattermost/logr/v2/filterstd.go b/vendor/github.com/mattermost/logr/v2/filterstd.go new file mode 100644 index 00000000..fe917fe5 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/filterstd.go @@ -0,0 +1,71 @@ +package logr + +// StdFilter allows targets to filter via classic log levels where any level +// beyond a certain verbosity/severity is enabled. +type StdFilter struct { + Lvl Level + Stacktrace Level +} + +// GetEnabledLevel returns the Level with the specified Level.ID and whether the level +// is enabled for this filter. +func (lt StdFilter) GetEnabledLevel(level Level) (Level, bool) { + enabled := level.ID <= lt.Lvl.ID + stackTrace := level.ID <= lt.Stacktrace.ID + var levelEnabled Level + + if enabled { + switch level.ID { + case Panic.ID: + levelEnabled = Panic + case Fatal.ID: + levelEnabled = Fatal + case Error.ID: + levelEnabled = Error + case Warn.ID: + levelEnabled = Warn + case Info.ID: + levelEnabled = Info + case Debug.ID: + levelEnabled = Debug + case Trace.ID: + levelEnabled = Trace + default: + levelEnabled = level + } + } + + if stackTrace { + levelEnabled.Stacktrace = true + } + + return levelEnabled, enabled +} + +// IsEnabled returns true if the specified Level is at or above this verbosity. Also +// determines if a stack trace is required. +func (lt StdFilter) IsEnabled(level Level) bool { + return level.ID <= lt.Lvl.ID +} + +// IsStacktraceEnabled returns true if the specified Level requires a stack trace. +func (lt StdFilter) IsStacktraceEnabled(level Level) bool { + return level.ID <= lt.Stacktrace.ID +} + +var ( + // Panic is the highest level of severity. + Panic = Level{ID: 0, Name: "panic", Color: Red} + // Fatal designates a catastrophic error. + Fatal = Level{ID: 1, Name: "fatal", Color: Red} + // Error designates a serious but possibly recoverable error. + Error = Level{ID: 2, Name: "error", Color: Red} + // Warn designates non-critical error. + Warn = Level{ID: 3, Name: "warn", Color: Yellow} + // Info designates information regarding application events. + Info = Level{ID: 4, Name: "info", Color: Cyan} + // Debug designates verbose information typically used for debugging. + Debug = Level{ID: 5, Name: "debug", Color: NoColor} + // Trace designates the highest verbosity of log output. + Trace = Level{ID: 6, Name: "trace", Color: NoColor} +) diff --git a/vendor/github.com/mattermost/logr/v2/formatter.go b/vendor/github.com/mattermost/logr/v2/formatter.go new file mode 100644 index 00000000..c8bb9b70 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/formatter.go @@ -0,0 +1,184 @@ +package logr + +import ( + "bytes" + "io" + "runtime" + "strconv" +) + +// Formatter turns a LogRec into a formatted string. +type Formatter interface { + // IsStacktraceNeeded returns true if this formatter requires a stacktrace to be + // generated for each LogRecord. Enabling features such as `Caller` field require + // a stacktrace. + IsStacktraceNeeded() bool + + // Format converts a log record to bytes. If buf is not nil then it will be + // be filled with the formatted results, otherwise a new buffer will be allocated. + Format(rec *LogRec, level Level, buf *bytes.Buffer) (*bytes.Buffer, error) +} + +const ( + // DefTimestampFormat is the default time stamp format used by Plain formatter and others. + DefTimestampFormat = "2006-01-02 15:04:05.000 Z07:00" + + // TimestampMillisFormat is the format for logging milliseconds UTC + TimestampMillisFormat = "Jan _2 15:04:05.000" +) + +type Writer struct { + io.Writer +} + +func (w Writer) Writes(elems ...[]byte) (int, error) { + var count int + for _, e := range elems { + if c, err := w.Write(e); err != nil { + return count + c, err + } else { + count += c + } + } + return count, nil +} + +// DefaultFormatter is the default formatter, outputting only text with +// no colors and a space delimiter. Use `format.Plain` instead. +type DefaultFormatter struct { +} + +// IsStacktraceNeeded always returns false for default formatter since the +// `Caller` field is not supported. +func (p *DefaultFormatter) IsStacktraceNeeded() bool { + return false +} + +// Format converts a log record to bytes. +func (p *DefaultFormatter) Format(rec *LogRec, level Level, buf *bytes.Buffer) (*bytes.Buffer, error) { + if buf == nil { + buf = &bytes.Buffer{} + } + timestampFmt := DefTimestampFormat + + buf.WriteString(rec.Time().Format(timestampFmt)) + buf.Write(Space) + + buf.WriteString(level.Name) + buf.Write(Space) + + buf.WriteString(rec.Msg()) + buf.Write(Space) + + fields := rec.Fields() + if len(fields) > 0 { + if err := WriteFields(buf, fields, Space, NoColor); err != nil { + return nil, err + } + } + + if level.Stacktrace { + frames := rec.StackFrames() + if len(frames) > 0 { + buf.Write(Newline) + if err := WriteStacktrace(buf, rec.StackFrames()); err != nil { + return nil, err + } + } + } + buf.Write(Newline) + + return buf, nil +} + +// WriteFields writes zero or more name value pairs to the io.Writer. +// The pairs output in key=value format with optional separator between fields. +func WriteFields(w io.Writer, fields []Field, separator []byte, color Color) error { + ws := Writer{w} + + sep := []byte{} + for _, field := range fields { + if err := writeField(ws, field, sep, color); err != nil { + return err + } + sep = separator + } + return nil +} + +func writeField(ws Writer, field Field, sep []byte, color Color) error { + if len(sep) != 0 { + if _, err := ws.Write(sep); err != nil { + return err + } + } + if err := WriteWithColor(ws, field.Key, color); err != nil { + return err + } + if _, err := ws.Write(Equals); err != nil { + return err + } + return field.ValueString(ws, shouldQuote) +} + +// shouldQuote returns true if val contains any characters that might be unsafe +// when injecting log output into an aggregator, viewer or report. +func shouldQuote(val string) bool { + for _, c := range val { + if !((c >= '0' && c <= '9') || + (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + c == '-' || c == '.' || c == '_' || c == '/' || c == '@' || c == '^' || c == '+') { + return true + } + } + return false +} + +// WriteStacktrace formats and outputs a stack trace to an io.Writer. +func WriteStacktrace(w io.Writer, frames []runtime.Frame) error { + ws := Writer{w} + for _, frame := range frames { + if frame.Function != "" { + if _, err := ws.Writes(Space, Space, []byte(frame.Function), Newline); err != nil { + return err + } + } + if frame.File != "" { + s := strconv.FormatInt(int64(frame.Line), 10) + if _, err := ws.Writes([]byte{' ', ' ', ' ', ' ', ' ', ' '}, []byte(frame.File), Colon, []byte(s), Newline); err != nil { + return err + } + } + } + return nil +} + +// WriteWithColor outputs a string with the specified ANSI color. +func WriteWithColor(w io.Writer, s string, color Color) error { + var err error + + writer := func(buf []byte) { + if err != nil { + return + } + _, err = w.Write(buf) + } + + if color != NoColor { + writer(AnsiColorPrefix) + writer([]byte(strconv.FormatInt(int64(color), 10))) + writer(AnsiColorSuffix) + } + + if err == nil { + _, err = io.WriteString(w, s) + } + + if color != NoColor { + writer(AnsiColorPrefix) + writer([]byte(strconv.FormatInt(int64(NoColor), 10))) + writer(AnsiColorSuffix) + } + return err +} diff --git a/vendor/github.com/mattermost/logr/v2/formatters/gelf.go b/vendor/github.com/mattermost/logr/v2/formatters/gelf.go new file mode 100644 index 00000000..9dece13c --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/formatters/gelf.go @@ -0,0 +1,152 @@ +package formatters + +import ( + "bytes" + "fmt" + "net" + "os" + "strings" + + "github.com/francoispqt/gojay" + "github.com/mattermost/logr/v2" +) + +const ( + GelfVersion = "1.1" + GelfVersionKey = "version" + GelfHostKey = "host" + GelfShortKey = "short_message" + GelfFullKey = "full_message" + GelfTimestampKey = "timestamp" + GelfLevelKey = "level" +) + +// Gelf formats log records as GELF rcords (https://docs.graylog.org/en/4.0/pages/gelf.html). +type Gelf struct { + // Hostname allows a custom hostname, otherwise os.Hostname is used + Hostname string `json:"hostname"` + + // EnableCaller enables output of the file and line number that emitted a log record. + EnableCaller bool `json:"enable_caller"` + + // FieldSorter allows custom sorting for the context fields. + FieldSorter func(fields []logr.Field) []logr.Field `json:"-"` +} + +func (g *Gelf) CheckValid() error { + return nil +} + +// IsStacktraceNeeded returns true if a stacktrace is needed so we can output the `Caller` field. +func (g *Gelf) IsStacktraceNeeded() bool { + return g.EnableCaller +} + +// Format converts a log record to bytes in GELF format. +func (g *Gelf) Format(rec *logr.LogRec, level logr.Level, buf *bytes.Buffer) (*bytes.Buffer, error) { + if buf == nil { + buf = &bytes.Buffer{} + } + enc := gojay.BorrowEncoder(buf) + defer func() { + enc.Release() + }() + + gr := gelfRecord{ + LogRec: rec, + Gelf: g, + level: level, + sorter: g.FieldSorter, + } + + err := enc.EncodeObject(gr) + if err != nil { + return nil, err + } + + buf.WriteByte(0) + return buf, nil +} + +type gelfRecord struct { + *logr.LogRec + *Gelf + level logr.Level + sorter func(fields []logr.Field) []logr.Field +} + +// MarshalJSONObject encodes the LogRec as JSON. +func (gr gelfRecord) MarshalJSONObject(enc *gojay.Encoder) { + enc.AddStringKey(GelfVersionKey, GelfVersion) + enc.AddStringKey(GelfHostKey, gr.getHostname()) + enc.AddStringKey(GelfShortKey, gr.Msg()) + + if gr.level.Stacktrace { + frames := gr.StackFrames() + if len(frames) != 0 { + var sbuf strings.Builder + for _, frame := range frames { + fmt.Fprintf(&sbuf, "%s\n %s:%d\n", frame.Function, frame.File, frame.Line) + } + enc.AddStringKey(GelfFullKey, sbuf.String()) + } + } + + secs := float64(gr.Time().UTC().Unix()) + millis := float64(gr.Time().Nanosecond() / 1000000) + ts := secs + (millis / 1000) + enc.AddFloat64Key(GelfTimestampKey, ts) + + enc.AddUint32Key(GelfLevelKey, uint32(gr.level.ID)) + + var fields []logr.Field + if gr.EnableCaller { + caller := logr.Field{ + Key: "_caller", + Type: logr.StringType, + String: gr.LogRec.Caller(), + } + fields = append(fields, caller) + } + + fields = append(fields, gr.Fields()...) + if gr.sorter != nil { + fields = gr.sorter(fields) + } + + if len(fields) > 0 { + for _, field := range fields { + if !strings.HasPrefix("_", field.Key) { + field.Key = "_" + field.Key + } + if err := encodeField(enc, field); err != nil { + enc.AddStringKey(field.Key, fmt.Sprintf("", err)) + } + } + } +} + +// IsNil returns true if the gelf record pointer is nil. +func (gr gelfRecord) IsNil() bool { + return gr.LogRec == nil +} + +func (g *Gelf) getHostname() string { + if g.Hostname != "" { + return g.Hostname + } + h, err := os.Hostname() + if err == nil { + return h + } + + // get the egress IP by fake dialing any address. UDP ensures no dial. + conn, err := net.Dial("udp", "8.8.8.8:80") + if err != nil { + return "unknown" + } + defer conn.Close() + + local := conn.LocalAddr().(*net.UDPAddr) + return local.IP.String() +} diff --git a/vendor/github.com/mattermost/logr/v2/formatters/json.go b/vendor/github.com/mattermost/logr/v2/formatters/json.go new file mode 100644 index 00000000..172b9612 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/formatters/json.go @@ -0,0 +1,273 @@ +package formatters + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" + "strings" + "sync" + + "github.com/francoispqt/gojay" + "github.com/mattermost/logr/v2" +) + +// JSON formats log records as JSON. +type JSON struct { + // DisableTimestamp disables output of timestamp field. + DisableTimestamp bool `json:"disable_timestamp"` + // DisableLevel disables output of level field. + DisableLevel bool `json:"disable_level"` + // DisableMsg disables output of msg field. + DisableMsg bool `json:"disable_msg"` + // DisableFields disables output of all fields. + DisableFields bool `json:"disable_fields"` + // DisableStacktrace disables output of stack trace. + DisableStacktrace bool `json:"disable_stacktrace"` + // EnableCaller enables output of the file and line number that emitted a log record. + EnableCaller bool `json:"enable_caller"` + + // TimestampFormat is an optional format for timestamps. If empty + // then DefTimestampFormat is used. + TimestampFormat string `json:"timestamp_format"` + + // KeyTimestamp overrides the timestamp field key name. + KeyTimestamp string `json:"key_timestamp"` + + // KeyLevel overrides the level field key name. + KeyLevel string `json:"key_level"` + + // KeyMsg overrides the msg field key name. + KeyMsg string `json:"key_msg"` + + // KeyGroupFields when not empty will group all context fields + // under this key. + KeyGroupFields string `json:"key_group_fields"` + + // KeyStacktrace overrides the stacktrace field key name. + KeyStacktrace string `json:"key_stacktrace"` + + // KeyCaller overrides the caller field key name. + KeyCaller string `json:"key_caller"` + + // FieldSorter allows custom sorting of the fields. If nil then + // no sorting is done. + FieldSorter func(fields []logr.Field) []logr.Field `json:"-"` + + once sync.Once +} + +func (j *JSON) CheckValid() error { + return nil +} + +// IsStacktraceNeeded returns true if a stacktrace is needed so we can output the `Caller` field. +func (j *JSON) IsStacktraceNeeded() bool { + return j.EnableCaller +} + +// Format converts a log record to bytes in JSON format. +func (j *JSON) Format(rec *logr.LogRec, level logr.Level, buf *bytes.Buffer) (*bytes.Buffer, error) { + j.once.Do(j.applyDefaultKeyNames) + + if buf == nil { + buf = &bytes.Buffer{} + } + enc := gojay.BorrowEncoder(buf) + defer func() { + enc.Release() + }() + + jlr := JSONLogRec{ + LogRec: rec, + JSON: j, + level: level, + sorter: j.FieldSorter, + } + + err := enc.EncodeObject(jlr) + if err != nil { + return nil, err + } + buf.WriteByte('\n') + return buf, nil +} + +func (j *JSON) applyDefaultKeyNames() { + if j.KeyTimestamp == "" { + j.KeyTimestamp = "timestamp" + } + if j.KeyLevel == "" { + j.KeyLevel = "level" + } + if j.KeyMsg == "" { + j.KeyMsg = "msg" + } + if j.KeyStacktrace == "" { + j.KeyStacktrace = "stacktrace" + } + if j.KeyCaller == "" { + j.KeyCaller = "caller" + } +} + +// JSONLogRec decorates a LogRec adding JSON encoding. +type JSONLogRec struct { + *logr.LogRec + *JSON + level logr.Level + sorter func(fields []logr.Field) []logr.Field +} + +// MarshalJSONObject encodes the LogRec as JSON. +func (jlr JSONLogRec) MarshalJSONObject(enc *gojay.Encoder) { + if !jlr.DisableTimestamp { + timestampFmt := jlr.TimestampFormat + if timestampFmt == "" { + timestampFmt = logr.DefTimestampFormat + } + time := jlr.Time() + enc.AddTimeKey(jlr.KeyTimestamp, &time, timestampFmt) + } + if !jlr.DisableLevel { + enc.AddStringKey(jlr.KeyLevel, jlr.level.Name) + } + if !jlr.DisableMsg { + enc.AddStringKey(jlr.KeyMsg, jlr.Msg()) + } + if jlr.EnableCaller { + enc.AddStringKey(jlr.KeyCaller, jlr.Caller()) + } + if !jlr.DisableFields { + fields := jlr.Fields() + if jlr.sorter != nil { + fields = jlr.sorter(fields) + } + if jlr.KeyGroupFields != "" { + enc.AddObjectKey(jlr.KeyGroupFields, FieldArray(fields)) + } else { + if len(fields) > 0 { + for _, field := range fields { + field = jlr.prefixCollision(field) + if err := encodeField(enc, field); err != nil { + enc.AddStringKey(field.Key, "") + } + } + } + } + } + if jlr.level.Stacktrace && !jlr.DisableStacktrace { + frames := jlr.StackFrames() + if len(frames) > 0 { + enc.AddArrayKey(jlr.KeyStacktrace, stackFrames(frames)) + } + } +} + +// IsNil returns true if the LogRec pointer is nil. +func (rec JSONLogRec) IsNil() bool { + return rec.LogRec == nil +} + +func (rec JSONLogRec) prefixCollision(field logr.Field) logr.Field { + switch field.Key { + case rec.KeyTimestamp, rec.KeyLevel, rec.KeyMsg, rec.KeyStacktrace: + f := field + f.Key = "_" + field.Key + return rec.prefixCollision(f) + } + return field +} + +type stackFrames []runtime.Frame + +// MarshalJSONArray encodes stackFrames slice as JSON. +func (s stackFrames) MarshalJSONArray(enc *gojay.Encoder) { + for _, frame := range s { + enc.AddObject(stackFrame(frame)) + } +} + +// IsNil returns true if stackFrames is empty slice. +func (s stackFrames) IsNil() bool { + return len(s) == 0 +} + +type stackFrame runtime.Frame + +// MarshalJSONArray encodes stackFrame as JSON. +func (f stackFrame) MarshalJSONObject(enc *gojay.Encoder) { + enc.AddStringKey("Function", f.Function) + enc.AddStringKey("File", f.File) + enc.AddIntKey("Line", f.Line) +} + +func (f stackFrame) IsNil() bool { + return false +} + +type FieldArray []logr.Field + +// MarshalJSONObject encodes Fields map to JSON. +func (fa FieldArray) MarshalJSONObject(enc *gojay.Encoder) { + for _, fld := range fa { + if err := encodeField(enc, fld); err != nil { + enc.AddStringKey(fld.Key, "") + } + } +} + +// IsNil returns true if map is nil. +func (fa FieldArray) IsNil() bool { + return fa == nil +} + +func encodeField(enc *gojay.Encoder, field logr.Field) error { + // first check if the value has a marshaller already. + switch vt := field.Interface.(type) { + case gojay.MarshalerJSONObject: + enc.AddObjectKey(field.Key, vt) + return nil + case gojay.MarshalerJSONArray: + enc.AddArrayKey(field.Key, vt) + return nil + } + + switch field.Type { + case logr.StringType: + enc.AddStringKey(field.Key, field.String) + + case logr.BoolType: + var b bool + if field.Integer != 0 { + b = true + } + enc.AddBoolKey(field.Key, b) + + case logr.StructType, logr.ArrayType, logr.MapType, logr.UnknownType: + b, err := json.Marshal(field.Interface) + if err != nil { + return err + } + embed := gojay.EmbeddedJSON(b) + enc.AddEmbeddedJSONKey(field.Key, &embed) + + case logr.StringerType, logr.ErrorType, logr.TimestampMillisType, logr.TimeType, logr.DurationType, logr.BinaryType: + var buf strings.Builder + _ = field.ValueString(&buf, nil) + enc.AddStringKey(field.Key, buf.String()) + + case logr.Int64Type, logr.Int32Type, logr.IntType: + enc.AddInt64Key(field.Key, field.Integer) + + case logr.Uint64Type, logr.Uint32Type, logr.UintType: + enc.AddUint64Key(field.Key, uint64(field.Integer)) + + case logr.Float64Type, logr.Float32Type: + enc.AddFloat64Key(field.Key, field.Float) + + default: + return fmt.Errorf("invalid field type: %d", field.Type) + } + return nil +} diff --git a/vendor/github.com/mattermost/logr/v2/formatters/plain.go b/vendor/github.com/mattermost/logr/v2/formatters/plain.go new file mode 100644 index 00000000..4d8af643 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/formatters/plain.go @@ -0,0 +1,146 @@ +package formatters + +import ( + "bytes" + "fmt" + "strings" + + "github.com/mattermost/logr/v2" +) + +// Plain is the simplest formatter, outputting only text with +// no colors. +type Plain struct { + // DisableTimestamp disables output of timestamp field. + DisableTimestamp bool `json:"disable_timestamp"` + // DisableLevel disables output of level field. + DisableLevel bool `json:"disable_level"` + // DisableMsg disables output of msg field. + DisableMsg bool `json:"disable_msg"` + // DisableFields disables output of all fields. + DisableFields bool `json:"disable_fields"` + // DisableStacktrace disables output of stack trace. + DisableStacktrace bool `json:"disable_stacktrace"` + // EnableCaller enables output of the file and line number that emitted a log record. + EnableCaller bool `json:"enable_caller"` + + // Delim is an optional delimiter output between each log field. + // Defaults to a single space. + Delim string `json:"delim"` + + // MinLevelLen sets the minimum level name length. If the level name is less + // than the minimum it will be padded with spaces. + MinLevelLen int `json:"min_level_len"` + + // MinMessageLen sets the minimum msg length. If the msg text is less + // than the minimum it will be padded with spaces. + MinMessageLen int `json:"min_msg_len"` + + // TimestampFormat is an optional format for timestamps. If empty + // then DefTimestampFormat is used. + TimestampFormat string `json:"timestamp_format"` + + // LineEnd sets the end of line character(s). Defaults to '\n'. + LineEnd string `json:"line_end"` + + // EnableColor sets whether output should include color. + EnableColor bool `json:"enable_color"` +} + +func (p *Plain) CheckValid() error { + if p.MinMessageLen < 0 || p.MinMessageLen > 1024 { + return fmt.Errorf("min_msg_len is invalid(%d)", p.MinMessageLen) + } + return nil +} + +// IsStacktraceNeeded returns true if a stacktrace is needed so we can output the `Caller` field. +func (p *Plain) IsStacktraceNeeded() bool { + return p.EnableCaller +} + +// Format converts a log record to bytes. +func (p *Plain) Format(rec *logr.LogRec, level logr.Level, buf *bytes.Buffer) (*bytes.Buffer, error) { + delim := p.Delim + if delim == "" { + delim = " " + } + if buf == nil { + buf = &bytes.Buffer{} + } + + timestampFmt := p.TimestampFormat + if timestampFmt == "" { + timestampFmt = logr.DefTimestampFormat + } + + color := logr.NoColor + if p.EnableColor { + color = level.Color + } + + if !p.DisableLevel { + _ = logr.WriteWithColor(buf, level.Name, color) + count := len(level.Name) + if p.MinLevelLen > count { + _, _ = buf.WriteString(strings.Repeat(" ", p.MinLevelLen-count)) + } + buf.WriteString(delim) + } + + if !p.DisableTimestamp { + var arr [128]byte + tbuf := rec.Time().AppendFormat(arr[:0], timestampFmt) + buf.WriteByte('[') + buf.Write(tbuf) + buf.WriteByte(']') + buf.WriteString(delim) + } + + if !p.DisableMsg { + count, _ := buf.WriteString(rec.Msg()) + if p.MinMessageLen > count { + _, _ = buf.WriteString(strings.Repeat(" ", p.MinMessageLen-count)) + } + _, _ = buf.WriteString(delim) + } + + var fields []logr.Field + + if p.EnableCaller { + fld := logr.Field{ + Key: "caller", + Type: logr.StringType, + String: rec.Caller(), + } + fields = append(fields, fld) + } + + if !p.DisableFields { + fields = append(fields, rec.Fields()...) + } + + if len(fields) > 0 { + if err := logr.WriteFields(buf, fields, logr.Space, color); err != nil { + return nil, err + } + } + + if level.Stacktrace && !p.DisableStacktrace { + frames := rec.StackFrames() + if len(frames) > 0 { + buf.WriteString("\n") + if err := logr.WriteStacktrace(buf, rec.StackFrames()); err != nil { + return nil, err + } + } + } + + if p.LineEnd == "" { + buf.WriteString("\n") + } else { + buf.WriteString(p.LineEnd) + } + + return buf, nil +} diff --git a/vendor/github.com/mattermost/logr/v2/level.go b/vendor/github.com/mattermost/logr/v2/level.go new file mode 100644 index 00000000..643d68e3 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/level.go @@ -0,0 +1,34 @@ +package logr + +var AnsiColorPrefix = []byte("\u001b[") +var AnsiColorSuffix = []byte("m") + +// Color for formatters that support color output. +type Color uint8 + +const ( + NoColor Color = 0 + Red Color = 31 + Green Color = 32 + Yellow Color = 33 + Blue Color = 34 + Magenta Color = 35 + Cyan Color = 36 + White Color = 37 +) + +// LevelID is the unique id of each level. +type LevelID uint + +// Level provides a mechanism to enable/disable specific log lines. +type Level struct { + ID LevelID `json:"id"` + Name string `json:"name"` + Stacktrace bool `json:"stacktrace,omitempty"` + Color Color `json:"color,omitempty"` +} + +// String returns the name of this level. +func (level Level) String() string { + return level.Name +} diff --git a/vendor/github.com/mattermost/logr/levelcache.go b/vendor/github.com/mattermost/logr/v2/levelcache.go similarity index 100% rename from vendor/github.com/mattermost/logr/levelcache.go rename to vendor/github.com/mattermost/logr/v2/levelcache.go diff --git a/vendor/github.com/mattermost/logr/v2/logger.go b/vendor/github.com/mattermost/logr/v2/logger.go new file mode 100644 index 00000000..6ce9c9f0 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/logger.go @@ -0,0 +1,99 @@ +package logr + +import "log" + +// Logger provides context for logging via fields. +type Logger struct { + lgr *Logr + fields []Field +} + +// Logr returns the `Logr` instance that created this `Logger`. +func (logger Logger) Logr() *Logr { + return logger.lgr +} + +// With creates a new `Logger` with any existing fields plus the new ones. +func (logger Logger) With(fields ...Field) Logger { + l := Logger{lgr: logger.lgr} + size := len(logger.fields) + len(fields) + if size > 0 { + l.fields = make([]Field, 0, size) + l.fields = append(l.fields, logger.fields...) + l.fields = append(l.fields, fields...) + } + return l +} + +// StdLogger creates a standard logger backed by this `Logr.Logger` instance. +// All log records are emitted with the specified log level. +func (logger Logger) StdLogger(level Level) *log.Logger { + return NewStdLogger(level, logger) +} + +// IsLevelEnabled determines if the specified level is enabled for at least +// one log target. +func (logger Logger) IsLevelEnabled(level Level) bool { + status := logger.Logr().IsLevelEnabled(level) + return status.Enabled +} + +// Sugar creates a new `Logger` with a less structured API. Any fields are preserved. +func (logger Logger) Sugar(fields ...Field) Sugar { + return Sugar{ + logger: logger.With(fields...), + } +} + +// Log checks that the level matches one or more targets, and +// if so, generates a log record that is added to the Logr queue. +// Arguments are handled in the manner of fmt.Print. +func (logger Logger) Log(lvl Level, msg string, fields ...Field) { + status := logger.lgr.IsLevelEnabled(lvl) + if status.Enabled { + rec := NewLogRec(lvl, logger, msg, fields, status.Stacktrace) + logger.lgr.enqueue(rec) + } +} + +// LogM calls `Log` multiple times, one for each level provided. +func (logger Logger) LogM(levels []Level, msg string, fields ...Field) { + for _, lvl := range levels { + logger.Log(lvl, msg, fields...) + } +} + +// Trace is a convenience method equivalent to `Log(TraceLevel, msg, fields...)`. +func (logger Logger) Trace(msg string, fields ...Field) { + logger.Log(Trace, msg, fields...) +} + +// Debug is a convenience method equivalent to `Log(DebugLevel, msg, fields...)`. +func (logger Logger) Debug(msg string, fields ...Field) { + logger.Log(Debug, msg, fields...) +} + +// Info is a convenience method equivalent to `Log(InfoLevel, msg, fields...)`. +func (logger Logger) Info(msg string, fields ...Field) { + logger.Log(Info, msg, fields...) +} + +// Warn is a convenience method equivalent to `Log(WarnLevel, msg, fields...)`. +func (logger Logger) Warn(msg string, fields ...Field) { + logger.Log(Warn, msg, fields...) +} + +// Error is a convenience method equivalent to `Log(ErrorLevel, msg, fields...)`. +func (logger Logger) Error(msg string, fields ...Field) { + logger.Log(Error, msg, fields...) +} + +// Fatal is a convenience method equivalent to `Log(FatalLevel, msg, fields...)` +func (logger Logger) Fatal(msg string, fields ...Field) { + logger.Log(Fatal, msg, fields...) +} + +// Panic is a convenience method equivalent to `Log(PanicLevel, msg, fields...)` +func (logger Logger) Panic(msg string, fields ...Field) { + logger.Log(Panic, msg, fields...) +} diff --git a/vendor/github.com/mattermost/logr/v2/logr.go b/vendor/github.com/mattermost/logr/v2/logr.go new file mode 100644 index 00000000..82b2a835 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/logr.go @@ -0,0 +1,471 @@ +package logr + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "sync" + "sync/atomic" + "time" + + "github.com/wiggin77/merror" +) + +// Logr maintains a list of log targets and accepts incoming +// log records. Use `New` to create instances. +type Logr struct { + tmux sync.RWMutex // targetHosts mutex + targetHosts []*TargetHost + + in chan *LogRec + quit chan struct{} // closed by Shutdown to exit read loop + done chan struct{} // closed when read loop exited + lvlCache levelCache + bufferPool sync.Pool + options *options + + metricsMux sync.RWMutex + metrics *metrics + + shutdown int32 +} + +// New creates a new Logr instance with one or more options specified. +// Some options with invalid values can cause an error to be returned, +// however `logr.New()` using just defaults never errors. +func New(opts ...Option) (*Logr, error) { + options := &options{ + maxQueueSize: DefaultMaxQueueSize, + enqueueTimeout: DefaultEnqueueTimeout, + shutdownTimeout: DefaultShutdownTimeout, + flushTimeout: DefaultFlushTimeout, + maxPooledBuffer: DefaultMaxPooledBuffer, + } + + lgr := &Logr{options: options} + + // apply the options + for _, opt := range opts { + if err := opt(lgr); err != nil { + return nil, err + } + } + pkgName := GetLogrPackageName() + if pkgName != "" { + opt := StackFilter(pkgName, pkgName+"/targets", pkgName+"/formatters") + _ = opt(lgr) + } + + lgr.in = make(chan *LogRec, lgr.options.maxQueueSize) + lgr.quit = make(chan struct{}) + lgr.done = make(chan struct{}) + + if lgr.options.useSyncMapLevelCache { + lgr.lvlCache = &syncMapLevelCache{} + } else { + lgr.lvlCache = &arrayLevelCache{} + } + lgr.lvlCache.setup() + + lgr.bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + lgr.initMetrics(lgr.options.metricsCollector, lgr.options.metricsUpdateFreqMillis) + + go lgr.start() + + return lgr, nil +} + +// AddTarget adds a target to the logger which will receive +// log records for outputting. +func (lgr *Logr) AddTarget(target Target, name string, filter Filter, formatter Formatter, maxQueueSize int) error { + if lgr.IsShutdown() { + return fmt.Errorf("AddTarget called after Logr shut down") + } + + lgr.metricsMux.RLock() + metrics := lgr.metrics + lgr.metricsMux.RUnlock() + + hostOpts := targetHostOptions{ + name: name, + filter: filter, + formatter: formatter, + maxQueueSize: maxQueueSize, + metrics: metrics, + } + + host, err := newTargetHost(target, hostOpts) + if err != nil { + return err + } + + lgr.tmux.Lock() + defer lgr.tmux.Unlock() + + lgr.targetHosts = append(lgr.targetHosts, host) + + lgr.ResetLevelCache() + + return nil +} + +// NewLogger creates a Logger using defaults. A `Logger` is light-weight +// enough to create on-demand, but typically one or more Loggers are +// created and re-used. +func (lgr *Logr) NewLogger() Logger { + logger := Logger{lgr: lgr} + return logger +} + +var levelStatusDisabled = LevelStatus{} + +// IsLevelEnabled returns true if at least one target has the specified +// level enabled. The result is cached so that subsequent checks are fast. +func (lgr *Logr) IsLevelEnabled(lvl Level) LevelStatus { + // No levels enabled after shutdown + if atomic.LoadInt32(&lgr.shutdown) != 0 { + return levelStatusDisabled + } + + // Check cache. + status, ok := lgr.lvlCache.get(lvl.ID) + if ok { + return status + } + + status = LevelStatus{} + + // Cache miss; check each target. + lgr.tmux.RLock() + defer lgr.tmux.RUnlock() + for _, host := range lgr.targetHosts { + enabled, level := host.IsLevelEnabled(lvl) + if enabled { + status.Enabled = true + if level.Stacktrace || host.formatter.IsStacktraceNeeded() { + status.Stacktrace = true + break // if both level and stacktrace enabled then no sense checking more targets + } + } + } + + // Cache and return the result. + if err := lgr.lvlCache.put(lvl.ID, status); err != nil { + lgr.ReportError(err) + return LevelStatus{} + } + return status +} + +// HasTargets returns true only if at least one target exists within the lgr. +func (lgr *Logr) HasTargets() bool { + lgr.tmux.RLock() + defer lgr.tmux.RUnlock() + return len(lgr.targetHosts) > 0 +} + +// TargetInfo provides name and type for a Target. +type TargetInfo struct { + Name string + Type string +} + +// TargetInfos enumerates all the targets added to this lgr. +// The resulting slice represents a snapshot at time of calling. +func (lgr *Logr) TargetInfos() []TargetInfo { + infos := make([]TargetInfo, 0) + + lgr.tmux.RLock() + defer lgr.tmux.RUnlock() + + for _, host := range lgr.targetHosts { + inf := TargetInfo{ + Name: host.String(), + Type: fmt.Sprintf("%T", host.target), + } + infos = append(infos, inf) + } + return infos +} + +// RemoveTargets safely removes one or more targets based on the filtering method. +// f should return true to delete the target, false to keep it. +// When removing a target, best effort is made to write any queued log records before +// closing, with cxt determining how much time can be spent in total. +// Note, keep the timeout short since this method blocks certain logging operations. +func (lgr *Logr) RemoveTargets(cxt context.Context, f func(ti TargetInfo) bool) error { + errs := merror.New() + hosts := make([]*TargetHost, 0) + + lgr.tmux.Lock() + defer lgr.tmux.Unlock() + + for _, host := range lgr.targetHosts { + inf := TargetInfo{ + Name: host.String(), + Type: fmt.Sprintf("%T", host.target), + } + if f(inf) { + if err := host.Shutdown(cxt); err != nil { + errs.Append(err) + } + } else { + hosts = append(hosts, host) + } + } + + lgr.targetHosts = hosts + lgr.ResetLevelCache() + + return errs.ErrorOrNil() +} + +// ResetLevelCache resets the cached results of `IsLevelEnabled`. This is +// called any time a Target is added or a target's level is changed. +func (lgr *Logr) ResetLevelCache() { + lgr.lvlCache.clear() +} + +// SetMetricsCollector sets (or resets) the metrics collector to be used for gathering +// metrics for all targets. Only targets added after this call will use the collector. +// +// To ensure all targets use a collector, use the `SetMetricsCollector` option when +// creating the Logr instead, or configure/reconfigure the Logr after calling this method. +func (lgr *Logr) SetMetricsCollector(collector MetricsCollector, updateFreqMillis int64) { + lgr.initMetrics(collector, updateFreqMillis) +} + +// enqueue adds a log record to the logr queue. If the queue is full then +// this function either blocks or the log record is dropped, depending on +// the result of calling `OnQueueFull`. +func (lgr *Logr) enqueue(rec *LogRec) { + select { + case lgr.in <- rec: + default: + if lgr.options.onQueueFull != nil && lgr.options.onQueueFull(rec, cap(lgr.in)) { + return // drop the record + } + select { + case <-time.After(lgr.options.enqueueTimeout): + lgr.ReportError(fmt.Errorf("enqueue timed out for log rec [%v]", rec)) + case lgr.in <- rec: // block until success or timeout + } + } +} + +// Flush blocks while flushing the logr queue and all target queues, by +// writing existing log records to valid targets. +// Any attempts to add new log records will block until flush is complete. +// `logr.FlushTimeout` determines how long flush can execute before +// timing out. Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (lgr *Logr) Flush() error { + ctx, cancel := context.WithTimeout(context.Background(), lgr.options.flushTimeout) + defer cancel() + return lgr.FlushWithTimeout(ctx) +} + +// Flush blocks while flushing the logr queue and all target queues, by +// writing existing log records to valid targets. +// Any attempts to add new log records will block until flush is complete. +// Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (lgr *Logr) FlushWithTimeout(ctx context.Context) error { + if !lgr.HasTargets() { + return nil + } + + if lgr.IsShutdown() { + return errors.New("Flush called on shut down Logr") + } + + rec := newFlushLogRec(lgr.NewLogger()) + lgr.enqueue(rec) + + select { + case <-ctx.Done(): + return newTimeoutError("logr queue flush timeout") + case <-rec.flush: + } + return nil +} + +// IsShutdown returns true if this Logr instance has been shut down. +// No further log records can be enqueued and no targets added after +// shutdown. +func (lgr *Logr) IsShutdown() bool { + return atomic.LoadInt32(&lgr.shutdown) != 0 +} + +// Shutdown cleanly stops the logging engine after making best efforts +// to flush all targets. Call this function right before application +// exit - logr cannot be restarted once shut down. +// `logr.ShutdownTimeout` determines how long shutdown can execute before +// timing out. Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (lgr *Logr) Shutdown() error { + ctx, cancel := context.WithTimeout(context.Background(), lgr.options.shutdownTimeout) + defer cancel() + return lgr.ShutdownWithTimeout(ctx) +} + +// Shutdown cleanly stops the logging engine after making best efforts +// to flush all targets. Call this function right before application +// exit - logr cannot be restarted once shut down. +// Use `IsTimeoutError` to determine if the returned error is due to a +// timeout. +func (lgr *Logr) ShutdownWithTimeout(ctx context.Context) error { + if err := lgr.FlushWithTimeout(ctx); err != nil { + return err + } + + if atomic.SwapInt32(&lgr.shutdown, 1) != 0 { + return errors.New("Shutdown called again after shut down") + } + + lgr.ResetLevelCache() + lgr.stopMetricsUpdater() + + close(lgr.quit) + + errs := merror.New() + + // Wait for read loop to exit + select { + case <-ctx.Done(): + errs.Append(newTimeoutError("logr queue shutdown timeout")) + case <-lgr.done: + } + + // logr.in channel should now be drained to targets and no more log records + // can be added. + lgr.tmux.RLock() + defer lgr.tmux.RUnlock() + for _, host := range lgr.targetHosts { + err := host.Shutdown(ctx) + if err != nil { + errs.Append(err) + } + } + return errs.ErrorOrNil() +} + +// ReportError is used to notify the host application of any internal logging errors. +// If `OnLoggerError` is not nil, it is called with the error, otherwise the error is +// output to `os.Stderr`. +func (lgr *Logr) ReportError(err interface{}) { + lgr.incErrorCounter() + + if lgr.options.onLoggerError == nil { + fmt.Fprintln(os.Stderr, err) + return + } + lgr.options.onLoggerError(fmt.Errorf("%v", err)) +} + +// BorrowBuffer borrows a buffer from the pool. Release the buffer to reduce garbage collection. +func (lgr *Logr) BorrowBuffer() *bytes.Buffer { + if lgr.options.disableBufferPool { + return &bytes.Buffer{} + } + return lgr.bufferPool.Get().(*bytes.Buffer) +} + +// ReleaseBuffer returns a buffer to the pool to reduce garbage collection. The buffer is only +// retained if less than MaxPooledBuffer. +func (lgr *Logr) ReleaseBuffer(buf *bytes.Buffer) { + if !lgr.options.disableBufferPool && buf.Cap() < lgr.options.maxPooledBuffer { + buf.Reset() + lgr.bufferPool.Put(buf) + } +} + +// start selects on incoming log records until shutdown record is received. +// Incoming log records are fanned out to all log targets. +func (lgr *Logr) start() { + defer func() { + if r := recover(); r != nil { + lgr.ReportError(r) + go lgr.start() + } else { + close(lgr.done) + } + }() + + for { + var rec *LogRec + select { + case rec = <-lgr.in: + if rec.flush != nil { + lgr.flush(rec.flush) + } else { + rec.prep() + lgr.fanout(rec) + } + case <-lgr.quit: + return + } + } +} + +// fanout pushes a LogRec to all targets. +func (lgr *Logr) fanout(rec *LogRec) { + var host *TargetHost + defer func() { + if r := recover(); r != nil { + lgr.ReportError(fmt.Errorf("fanout failed for target %s, %v", host.String(), r)) + } + }() + + var logged bool + + lgr.tmux.RLock() + defer lgr.tmux.RUnlock() + for _, host = range lgr.targetHosts { + if enabled, _ := host.IsLevelEnabled(rec.Level()); enabled { + host.Log(rec) + logged = true + } + } + + if logged { + lgr.incLoggedCounter() + } +} + +// flush drains the queue and notifies when done. +func (lgr *Logr) flush(done chan<- struct{}) { + // first drain the logr queue. +loop: + for { + var rec *LogRec + select { + case rec = <-lgr.in: + if rec.flush == nil { + rec.prep() + lgr.fanout(rec) + } + default: + break loop + } + } + + logger := lgr.NewLogger() + + // drain all the targets; block until finished. + lgr.tmux.RLock() + defer lgr.tmux.RUnlock() + for _, host := range lgr.targetHosts { + rec := newFlushLogRec(logger) + host.Log(rec) + <-rec.flush + } + done <- struct{}{} +} diff --git a/vendor/github.com/mattermost/logr/logrec.go b/vendor/github.com/mattermost/logr/v2/logrec.go similarity index 60% rename from vendor/github.com/mattermost/logr/logrec.go rename to vendor/github.com/mattermost/logr/v2/logrec.go index 9428aaec..76d51b9e 100644 --- a/vendor/github.com/mattermost/logr/logrec.go +++ b/vendor/github.com/mattermost/logr/v2/logrec.go @@ -2,24 +2,13 @@ package logr import ( "fmt" + "path/filepath" "runtime" "strings" "sync" "time" ) -var ( - logrPkg string -) - -func init() { - // Calc current package name - pcs := make([]uintptr, 2) - _ = runtime.Callers(0, pcs) - tmp := runtime.FuncForPC(pcs[1]).Name() - logrPkg = getPackageName(tmp) -} - // LogRec collects raw, unformatted data to be logged. // TODO: pool these? how to reliably know when targets are done with them? Copy for each target? type LogRec struct { @@ -29,9 +18,9 @@ type LogRec struct { level Level logger Logger - template string - newline bool - args []interface{} + msg string + newline bool + fields []Field stackPC []uintptr stackCount int @@ -40,13 +29,14 @@ type LogRec struct { flush chan struct{} // remaining fields calculated by `prep` - msg string - frames []runtime.Frame + frames []runtime.Frame + fieldsAll []Field + caller string } // NewLogRec creates a new LogRec with the current time and optional stack trace. -func NewLogRec(lvl Level, logger Logger, template string, args []interface{}, incStacktrace bool) *LogRec { - rec := &LogRec{time: time.Now(), logger: logger, level: lvl, template: template, args: args} +func NewLogRec(lvl Level, logger Logger, msg string, fields []Field, incStacktrace bool) *LogRec { + rec := &LogRec{time: time.Now(), logger: logger, level: lvl, msg: msg, fields: fields} if incStacktrace { rec.stackPC = make([]uintptr, DefaultMaxStackFrames) rec.stackCount = runtime.Callers(2, rec.stackPC) @@ -60,44 +50,40 @@ func newFlushLogRec(logger Logger) *LogRec { return &LogRec{logger: logger, flush: make(chan struct{})} } -// prep resolves all args and field values to strings, and -// resolves stack trace to frames. +// prep resolves stack trace to frames. func (rec *LogRec) prep() { rec.mux.Lock() defer rec.mux.Unlock() - // resolve args - if rec.template == "" { - if rec.newline { - rec.msg = fmt.Sprintln(rec.args...) - } else { - rec.msg = fmt.Sprint(rec.args...) - } - } else { - rec.msg = fmt.Sprintf(rec.template, rec.args...) - } + // include log rec fields and logger fields added via "With" + rec.fieldsAll = make([]Field, 0, len(rec.fields)+len(rec.logger.fields)) + rec.fieldsAll = append(rec.fieldsAll, rec.logger.fields...) + rec.fieldsAll = append(rec.fieldsAll, rec.fields...) + + filter := rec.logger.lgr.options.stackFilter // resolve stack trace if rec.stackCount > 0 { + rec.frames = make([]runtime.Frame, 0, rec.stackCount) frames := runtime.CallersFrames(rec.stackPC[:rec.stackCount]) for { - f, more := frames.Next() - rec.frames = append(rec.frames, f) - if !more { - break + frame, more := frames.Next() + + // remove all package entries that are in filter. + pkg := ResolvePackageName(frame.Function) + if _, ok := filter[pkg]; !ok && pkg != "" { + rec.frames = append(rec.frames, frame) } - } - // remove leading logr package entries. - var start int - for i, frame := range rec.frames { - pkg := getPackageName(frame.Function) - if pkg != "" && pkg != logrPkg { - start = i + if !more { break } } - rec.frames = rec.frames[start:] + } + + // calc caller if stack trace provided + if len(rec.frames) > 0 { + rec.caller = calcCaller(rec.frames) } } @@ -112,10 +98,9 @@ func (rec *LogRec) WithTime(time time.Time) *LogRec { time: time, level: rec.level, logger: rec.logger, - template: rec.template, - newline: rec.newline, - args: rec.args, msg: rec.msg, + newline: rec.newline, + fields: rec.fields, stackPC: rec.stackPC, stackCount: rec.stackCount, frames: rec.frames, @@ -140,9 +125,9 @@ func (rec *LogRec) Level() Level { } // Fields returns this log record's Fields. -func (rec *LogRec) Fields() Fields { +func (rec *LogRec) Fields() []Field { // no locking needed as this field is not mutated. - return rec.logger.fields + return rec.fieldsAll } // Msg returns this log record's message text. @@ -160,6 +145,15 @@ func (rec *LogRec) StackFrames() []runtime.Frame { return rec.frames } +// Caller returns this log record's caller info, meaning the file and line +// number where this log record was emitted. Returns empty string if no +// stack trace was provided. +func (rec *LogRec) Caller() string { + rec.mux.RLock() + defer rec.mux.RUnlock() + return rec.caller +} + // String returns a string representation of this log record. func (rec *LogRec) String() string { if rec.flush != nil { @@ -167,23 +161,22 @@ func (rec *LogRec) String() string { } f := &DefaultFormatter{} - buf := rec.logger.logr.BorrowBuffer() - defer rec.logger.logr.ReleaseBuffer(buf) - buf, _ = f.Format(rec, true, buf) + buf := rec.logger.lgr.BorrowBuffer() + defer rec.logger.lgr.ReleaseBuffer(buf) + buf, _ = f.Format(rec, rec.Level(), buf) return strings.TrimSpace(buf.String()) } -// getPackageName reduces a fully qualified function name to the package name -// By sirupsen: https://github.com/sirupsen/logrus/blob/master/entry.go -func getPackageName(f string) string { - for { - lastPeriod := strings.LastIndex(f, ".") - lastSlash := strings.LastIndex(f, "/") - if lastPeriod > lastSlash { - f = f[:lastPeriod] - } else { - break +func calcCaller(frames []runtime.Frame) string { + for _, frame := range frames { + if frame.File == "" { + continue } + + dir, file := filepath.Split(frame.File) + base := filepath.Base(dir) + + return fmt.Sprintf("%s/%s:%d", base, file, frame.Line) } - return f + return "" } diff --git a/vendor/github.com/mattermost/logr/v2/metrics.go b/vendor/github.com/mattermost/logr/v2/metrics.go new file mode 100644 index 00000000..f4f4d67f --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/metrics.go @@ -0,0 +1,140 @@ +package logr + +import "time" + +const ( + DefMetricsUpdateFreqMillis = 15000 // 15 seconds +) + +// Counter is a simple metrics sink that can only increment a value. +// Implementations are external to Logr and provided via `MetricsCollector`. +type Counter interface { + // Inc increments the counter by 1. Use Add to increment it by arbitrary non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < 0. + Add(float64) +} + +// Gauge is a simple metrics sink that can receive values and increase or decrease. +// Implementations are external to Logr and provided via `MetricsCollector`. +type Gauge interface { + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Add adds the given value to the Gauge. (The value can be negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// MetricsCollector provides a way for users of this Logr package to have metrics pushed +// in an efficient way to any backend, e.g. Prometheus. +// For each target added to Logr, the supplied MetricsCollector will provide a Gauge +// and Counters that will be called frequently as logging occurs. +type MetricsCollector interface { + // QueueSizeGauge returns a Gauge that will be updated by the named target. + QueueSizeGauge(target string) (Gauge, error) + // LoggedCounter returns a Counter that will be incremented by the named target. + LoggedCounter(target string) (Counter, error) + // ErrorCounter returns a Counter that will be incremented by the named target. + ErrorCounter(target string) (Counter, error) + // DroppedCounter returns a Counter that will be incremented by the named target. + DroppedCounter(target string) (Counter, error) + // BlockedCounter returns a Counter that will be incremented by the named target. + BlockedCounter(target string) (Counter, error) +} + +// TargetWithMetrics is a target that provides metrics. +type TargetWithMetrics interface { + EnableMetrics(collector MetricsCollector, updateFreqMillis int64) error +} + +type metrics struct { + collector MetricsCollector + updateFreqMillis int64 + queueSizeGauge Gauge + loggedCounter Counter + errorCounter Counter + done chan struct{} +} + +// initMetrics initializes metrics collection. +func (lgr *Logr) initMetrics(collector MetricsCollector, updatefreq int64) { + lgr.stopMetricsUpdater() + + if collector == nil { + lgr.metricsMux.Lock() + lgr.metrics = nil + lgr.metricsMux.Unlock() + return + } + + metrics := &metrics{ + collector: collector, + updateFreqMillis: updatefreq, + done: make(chan struct{}), + } + metrics.queueSizeGauge, _ = collector.QueueSizeGauge("_logr") + metrics.loggedCounter, _ = collector.LoggedCounter("_logr") + metrics.errorCounter, _ = collector.ErrorCounter("_logr") + + lgr.metricsMux.Lock() + lgr.metrics = metrics + lgr.metricsMux.Unlock() + + go lgr.startMetricsUpdater() +} + +func (lgr *Logr) setQueueSizeGauge(val float64) { + lgr.metricsMux.RLock() + defer lgr.metricsMux.RUnlock() + + if lgr.metrics != nil { + lgr.metrics.queueSizeGauge.Set(val) + } +} + +func (lgr *Logr) incLoggedCounter() { + lgr.metricsMux.RLock() + defer lgr.metricsMux.RUnlock() + + if lgr.metrics != nil { + lgr.metrics.loggedCounter.Inc() + } +} + +func (lgr *Logr) incErrorCounter() { + lgr.metricsMux.RLock() + defer lgr.metricsMux.RUnlock() + + if lgr.metrics != nil { + lgr.metrics.errorCounter.Inc() + } +} + +// startMetricsUpdater updates the metrics for any polled values every `metricsUpdateFreqSecs` seconds until +// logr is closed. +func (lgr *Logr) startMetricsUpdater() { + for { + lgr.metricsMux.RLock() + metrics := lgr.metrics + c := metrics.done + lgr.metricsMux.RUnlock() + + select { + case <-c: + return + case <-time.After(time.Duration(metrics.updateFreqMillis) * time.Millisecond): + lgr.setQueueSizeGauge(float64(len(lgr.in))) + } + } +} + +func (lgr *Logr) stopMetricsUpdater() { + lgr.metricsMux.Lock() + defer lgr.metricsMux.Unlock() + + if lgr.metrics != nil && lgr.metrics.done != nil { + close(lgr.metrics.done) + lgr.metrics.done = nil + } +} diff --git a/vendor/github.com/mattermost/logr/v2/options.go b/vendor/github.com/mattermost/logr/v2/options.go new file mode 100644 index 00000000..638f638a --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/options.go @@ -0,0 +1,192 @@ +package logr + +import ( + "errors" + "time" +) + +type Option func(*Logr) error + +type options struct { + maxQueueSize int + onLoggerError func(error) + onQueueFull func(rec *LogRec, maxQueueSize int) bool + onTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool + onExit func(code int) + onPanic func(err interface{}) + enqueueTimeout time.Duration + shutdownTimeout time.Duration + flushTimeout time.Duration + useSyncMapLevelCache bool + maxPooledBuffer int + disableBufferPool bool + metricsCollector MetricsCollector + metricsUpdateFreqMillis int64 + stackFilter map[string]struct{} +} + +// MaxQueueSize is the maximum number of log records that can be queued. +// If exceeded, `OnQueueFull` is called which determines if the log +// record will be dropped or block until add is successful. +// Defaults to DefaultMaxQueueSize. +func MaxQueueSize(size int) Option { + return func(l *Logr) error { + if size < 0 { + return errors.New("size cannot be less than zero") + } + l.options.maxQueueSize = size + return nil + } +} + +// OnLoggerError, when not nil, is called any time an internal +// logging error occurs. For example, this can happen when a +// target cannot connect to its data sink. +func OnLoggerError(f func(error)) Option { + return func(l *Logr) error { + l.options.onLoggerError = f + return nil + } +} + +// OnQueueFull, when not nil, is called on an attempt to add +// a log record to a full Logr queue. +// `MaxQueueSize` can be used to modify the maximum queue size. +// This function should return quickly, with a bool indicating whether +// the log record should be dropped (true) or block until the log record +// is successfully added (false). If nil then blocking (false) is assumed. +func OnQueueFull(f func(rec *LogRec, maxQueueSize int) bool) Option { + return func(l *Logr) error { + l.options.onQueueFull = f + return nil + } +} + +// OnTargetQueueFull, when not nil, is called on an attempt to add +// a log record to a full target queue provided the target supports reporting +// this condition. +// This function should return quickly, with a bool indicating whether +// the log record should be dropped (true) or block until the log record +// is successfully added (false). If nil then blocking (false) is assumed. +func OnTargetQueueFull(f func(target Target, rec *LogRec, maxQueueSize int) bool) Option { + return func(l *Logr) error { + l.options.onTargetQueueFull = f + return nil + } +} + +// OnExit, when not nil, is called when a FatalXXX style log API is called. +// When nil, then the default behavior is to cleanly shut down this Logr and +// call `os.Exit(code)`. +func OnExit(f func(code int)) Option { + return func(l *Logr) error { + l.options.onExit = f + return nil + } +} + +// OnPanic, when not nil, is called when a PanicXXX style log API is called. +// When nil, then the default behavior is to cleanly shut down this Logr and +// call `panic(err)`. +func OnPanic(f func(err interface{})) Option { + return func(l *Logr) error { + l.options.onPanic = f + return nil + } +} + +// EnqueueTimeout is the amount of time a log record can take to be queued. +// This only applies to blocking enqueue which happen after `logr.OnQueueFull` +// is called and returns false. +func EnqueueTimeout(dur time.Duration) Option { + return func(l *Logr) error { + l.options.enqueueTimeout = dur + return nil + } +} + +// ShutdownTimeout is the amount of time `logr.Shutdown` can execute before +// timing out. An alternative is to use `logr.ShutdownWithContext` and supply +// a timeout. +func ShutdownTimeout(dur time.Duration) Option { + return func(l *Logr) error { + l.options.shutdownTimeout = dur + return nil + } +} + +// FlushTimeout is the amount of time `logr.Flush` can execute before +// timing out. An alternative is to use `logr.FlushWithContext` and supply +// a timeout. +func FlushTimeout(dur time.Duration) Option { + return func(l *Logr) error { + l.options.flushTimeout = dur + return nil + } +} + +// UseSyncMapLevelCache can be set to true when high concurrency (e.g. >32 cores) +// is expected. This may improve performance with large numbers of cores - benchmark +// for your use case. +func UseSyncMapLevelCache(use bool) Option { + return func(l *Logr) error { + l.options.useSyncMapLevelCache = use + return nil + } +} + +// MaxPooledBufferSize determines the maximum size of a buffer that can be +// pooled. To reduce allocations, the buffers needed during formatting (etc) +// are pooled. A very large log item will grow a buffer that could stay in +// memory indefinitely. This setting lets you control how big a pooled buffer +// can be - anything larger will be garbage collected after use. +// Defaults to 1MB. +func MaxPooledBufferSize(size int) Option { + return func(l *Logr) error { + l.options.maxPooledBuffer = size + return nil + } +} + +// DisableBufferPool when true disables the buffer pool. See MaxPooledBuffer. +func DisableBufferPool(disable bool) Option { + return func(l *Logr) error { + l.options.disableBufferPool = disable + return nil + } +} + +// SetMetricsCollector enables metrics collection by supplying a MetricsCollector. +// The MetricsCollector provides counters and gauges that are updated by log targets. +// `updateFreqMillis` determines how often polled metrics are updated. Defaults to 15000 (15 seconds) +// and must be at least 250 so we don't peg the CPU. +func SetMetricsCollector(collector MetricsCollector, updateFreqMillis int64) Option { + return func(l *Logr) error { + if collector == nil { + return errors.New("collector cannot be nil") + } + if updateFreqMillis < 250 { + return errors.New("updateFreqMillis cannot be less than 250") + } + l.options.metricsCollector = collector + l.options.metricsUpdateFreqMillis = updateFreqMillis + return nil + } +} + +// StackFilter provides a list of package names to exclude from the top of +// stack traces. The Logr packages are automatically filtered. +func StackFilter(pkg ...string) Option { + return func(l *Logr) error { + if l.options.stackFilter == nil { + l.options.stackFilter = make(map[string]struct{}) + } + + for _, p := range pkg { + if p != "" { + l.options.stackFilter[p] = struct{}{} + } + } + return nil + } +} diff --git a/vendor/github.com/mattermost/logr/v2/pkg.go b/vendor/github.com/mattermost/logr/v2/pkg.go new file mode 100644 index 00000000..873b2e95 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/pkg.go @@ -0,0 +1,57 @@ +package logr + +import ( + "runtime" + "strings" + "sync" +) + +const ( + maximumStackDepth int = 30 +) + +var ( + logrPkg string + pkgCalcOnce sync.Once +) + +// GetPackageName returns the root package name of Logr. +func GetLogrPackageName() string { + pkgCalcOnce.Do(func() { + logrPkg = GetPackageName("GetLogrPackageName") + }) + return logrPkg +} + +// GetPackageName returns the package name of the caller. +// `callingFuncName` should be the name of the calling function and +// should be unique enough not to collide with any runtime methods. +func GetPackageName(callingFuncName string) string { + var pkgName string + + pcs := make([]uintptr, maximumStackDepth) + _ = runtime.Callers(0, pcs) + + for _, pc := range pcs { + funcName := runtime.FuncForPC(pc).Name() + if strings.Contains(funcName, callingFuncName) { + pkgName = ResolvePackageName(funcName) + break + } + } + return pkgName +} + +// ResolvePackageName reduces a fully qualified function name to the package name +func ResolvePackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + return f +} diff --git a/vendor/github.com/mattermost/logr/v2/stdlogger.go b/vendor/github.com/mattermost/logr/v2/stdlogger.go new file mode 100644 index 00000000..50171b3d --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/stdlogger.go @@ -0,0 +1,56 @@ +package logr + +import ( + "log" + "os" + "strings" +) + +// NewStdLogger creates a standard logger backed by a Logr instance. +// All log records are emitted with the specified log level. +func NewStdLogger(level Level, logger Logger) *log.Logger { + adapter := newStdLogAdapter(logger, level) + return log.New(adapter, "", 0) +} + +// RedirectStdLog redirects output from the standard library's package-global logger +// to this logger at the specified level and with zero or more Field's. Since Logr already +// handles caller annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// A function is returned that restores the original prefix and flags and resets the standard +// library's output to os.Stderr. +func (lgr *Logr) RedirectStdLog(level Level, fields ...Field) func() { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + + logger := lgr.NewLogger().With(fields...) + adapter := newStdLogAdapter(logger, level) + log.SetOutput(adapter) + + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + } +} + +type stdLogAdapter struct { + logger Logger + level Level +} + +func newStdLogAdapter(logger Logger, level Level) *stdLogAdapter { + return &stdLogAdapter{ + logger: logger, + level: level, + } +} + +// Write implements io.Writer +func (a *stdLogAdapter) Write(p []byte) (int, error) { + s := strings.TrimSpace(string(p)) + a.logger.Log(a.level, s) + return len(p), nil +} diff --git a/vendor/github.com/mattermost/logr/v2/sugar.go b/vendor/github.com/mattermost/logr/v2/sugar.go new file mode 100644 index 00000000..882f0fd5 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/sugar.go @@ -0,0 +1,197 @@ +package logr + +import ( + "fmt" +) + +// Sugar provides a less structured API for logging. +type Sugar struct { + logger Logger +} + +func (s Sugar) sugarLog(lvl Level, msg string, args ...interface{}) { + if s.logger.IsLevelEnabled(lvl) { + fields := make([]Field, 0, len(args)) + for _, arg := range args { + fields = append(fields, Any("", arg)) + } + s.logger.Log(lvl, msg, fields...) + } +} + +// Trace is a convenience method equivalent to `Log(TraceLevel, msg, args...)`. +func (s Sugar) Trace(msg string, args ...interface{}) { + s.sugarLog(Trace, msg, args...) +} + +// Debug is a convenience method equivalent to `Log(DebugLevel, msg, args...)`. +func (s Sugar) Debug(msg string, args ...interface{}) { + s.sugarLog(Debug, msg, args...) +} + +// Print ensures compatibility with std lib logger. +func (s Sugar) Print(msg string, args ...interface{}) { + s.Info(msg, args...) +} + +// Info is a convenience method equivalent to `Log(InfoLevel, msg, args...)`. +func (s Sugar) Info(msg string, args ...interface{}) { + s.sugarLog(Info, msg, args...) +} + +// Warn is a convenience method equivalent to `Log(WarnLevel, msg, args...)`. +func (s Sugar) Warn(msg string, args ...interface{}) { + s.sugarLog(Warn, msg, args...) +} + +// Error is a convenience method equivalent to `Log(ErrorLevel, msg, args...)`. +func (s Sugar) Error(msg string, args ...interface{}) { + s.sugarLog(Error, msg, args...) +} + +// Fatal is a convenience method equivalent to `Log(FatalLevel, msg, args...)` +func (s Sugar) Fatal(msg string, args ...interface{}) { + s.sugarLog(Fatal, msg, args...) +} + +// Panic is a convenience method equivalent to `Log(PanicLevel, msg, args...)` +func (s Sugar) Panic(msg string, args ...interface{}) { + s.sugarLog(Panic, msg, args...) +} + +// +// Printf style +// + +// Logf checks that the level matches one or more targets, and +// if so, generates a log record that is added to the main +// queue (channel). Arguments are handled in the manner of fmt.Printf. +func (s Sugar) Logf(lvl Level, format string, args ...interface{}) { + if s.logger.IsLevelEnabled(lvl) { + var msg string + if format == "" { + msg = fmt.Sprint(args...) + } else { + msg = fmt.Sprintf(format, args...) + } + s.logger.Log(lvl, msg) + } +} + +// Tracef is a convenience method equivalent to `Logf(TraceLevel, args...)`. +func (s Sugar) Tracef(format string, args ...interface{}) { + s.Logf(Trace, format, args...) +} + +// Debugf is a convenience method equivalent to `Logf(DebugLevel, args...)`. +func (s Sugar) Debugf(format string, args ...interface{}) { + s.Logf(Debug, format, args...) +} + +// Infof is a convenience method equivalent to `Logf(InfoLevel, args...)`. +func (s Sugar) Infof(format string, args ...interface{}) { + s.Logf(Info, format, args...) +} + +// Printf ensures compatibility with std lib logger. +func (s Sugar) Printf(format string, args ...interface{}) { + s.Infof(format, args...) +} + +// Warnf is a convenience method equivalent to `Logf(WarnLevel, args...)`. +func (s Sugar) Warnf(format string, args ...interface{}) { + s.Logf(Warn, format, args...) +} + +// Errorf is a convenience method equivalent to `Logf(ErrorLevel, args...)`. +func (s Sugar) Errorf(format string, args ...interface{}) { + s.Logf(Error, format, args...) +} + +// Fatalf is a convenience method equivalent to `Logf(FatalLevel, args...)` +func (s Sugar) Fatalf(format string, args ...interface{}) { + s.Logf(Fatal, format, args...) +} + +// Panicf is a convenience method equivalent to `Logf(PanicLevel, args...)` +func (s Sugar) Panicf(format string, args ...interface{}) { + s.Logf(Panic, format, args...) +} + +// +// K/V style +// + +// With returns a new Sugar logger with the specified key/value pairs added to the +// fields list. +func (s Sugar) With(keyValuePairs ...interface{}) Sugar { + return s.logger.With(s.argsToFields(keyValuePairs)...).Sugar() +} + +// Tracew outputs at trace level with the specified key/value pairs converted to fields. +func (s Sugar) Tracew(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Trace, msg, s.argsToFields(keyValuePairs)...) +} + +// Debugw outputs at debug level with the specified key/value pairs converted to fields. +func (s Sugar) Debugw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Debug, msg, s.argsToFields(keyValuePairs)...) +} + +// Infow outputs at info level with the specified key/value pairs converted to fields. +func (s Sugar) Infow(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Info, msg, s.argsToFields(keyValuePairs)...) +} + +// Warnw outputs at warn level with the specified key/value pairs converted to fields. +func (s Sugar) Warnw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Warn, msg, s.argsToFields(keyValuePairs)...) +} + +// Errorw outputs at error level with the specified key/value pairs converted to fields. +func (s Sugar) Errorw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Error, msg, s.argsToFields(keyValuePairs)...) +} + +// Fatalw outputs at fatal level with the specified key/value pairs converted to fields. +func (s Sugar) Fatalw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Fatal, msg, s.argsToFields(keyValuePairs)...) +} + +// Panicw outputs at panic level with the specified key/value pairs converted to fields. +func (s Sugar) Panicw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Panic, msg, s.argsToFields(keyValuePairs)...) +} + +// argsToFields converts an array of args, possibly containing name/value pairs +// into a []Field. +func (s Sugar) argsToFields(keyValuePairs []interface{}) []Field { + if len(keyValuePairs) == 0 { + return nil + } + + fields := make([]Field, 0, len(keyValuePairs)) + count := len(keyValuePairs) + + for i := 0; i < count; { + if fld, ok := keyValuePairs[i].(Field); ok { + fields = append(fields, fld) + i++ + continue + } + + if i == count-1 { + s.logger.Error("invalid key/value pair", Any("arg", keyValuePairs[i])) + break + } + + // we should have a key/value pair now. The key must be a string. + if key, ok := keyValuePairs[i].(string); !ok { + s.logger.Error("invalid key for key/value pair", Int("pos", i)) + } else { + fields = append(fields, Any(key, keyValuePairs[i+1])) + } + i += 2 + } + return fields +} diff --git a/vendor/github.com/mattermost/logr/v2/target.go b/vendor/github.com/mattermost/logr/v2/target.go new file mode 100644 index 00000000..fa0a9320 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/target.go @@ -0,0 +1,304 @@ +package logr + +import ( + "context" + "errors" + "fmt" + "os" + "sync/atomic" + "time" +) + +// Target represents a destination for log records such as file, +// database, TCP socket, etc. +type Target interface { + // Init is called once to initialize the target. + Init() error + + // Write outputs to this target's destination. + Write(p []byte, rec *LogRec) (int, error) + + // Shutdown is called once to free/close any resources. + // Target queue is already drained when this is called. + Shutdown() error +} + +type targetMetrics struct { + queueSizeGauge Gauge + loggedCounter Counter + errorCounter Counter + droppedCounter Counter + blockedCounter Counter +} + +type targetHostOptions struct { + name string + filter Filter + formatter Formatter + maxQueueSize int + metrics *metrics +} + +// TargetHost hosts and manages the lifecycle of a target. +// Incoming log records are queued and formatted before +// being passed to the target. +type TargetHost struct { + target Target + name string + + filter Filter + formatter Formatter + + in chan *LogRec + quit chan struct{} // closed by Shutdown to exit read loop + done chan struct{} // closed when read loop exited + targetMetrics *targetMetrics + + shutdown int32 +} + +func newTargetHost(target Target, options targetHostOptions) (*TargetHost, error) { + host := &TargetHost{ + target: target, + name: options.name, + filter: options.filter, + formatter: options.formatter, + in: make(chan *LogRec, options.maxQueueSize), + quit: make(chan struct{}), + done: make(chan struct{}), + } + + if host.name == "" { + host.name = fmt.Sprintf("%T", target) + } + + if host.filter == nil { + host.filter = &StdFilter{Lvl: Fatal} + } + if host.formatter == nil { + host.formatter = &DefaultFormatter{} + } + + err := host.initMetrics(options.metrics) + if err != nil { + return nil, err + } + + err = target.Init() + if err != nil { + return nil, err + } + + go host.start() + + return host, nil +} + +func (h *TargetHost) initMetrics(metrics *metrics) error { + if metrics == nil { + return nil + } + + var err error + tmetrics := &targetMetrics{} + + if tmetrics.queueSizeGauge, err = metrics.collector.QueueSizeGauge(h.name); err != nil { + return err + } + if tmetrics.loggedCounter, err = metrics.collector.LoggedCounter(h.name); err != nil { + return err + } + if tmetrics.errorCounter, err = metrics.collector.ErrorCounter(h.name); err != nil { + return err + } + if tmetrics.droppedCounter, err = metrics.collector.DroppedCounter(h.name); err != nil { + return err + } + if tmetrics.blockedCounter, err = metrics.collector.BlockedCounter(h.name); err != nil { + return err + } + h.targetMetrics = tmetrics + + updateFreqMillis := metrics.updateFreqMillis + if updateFreqMillis == 0 { + updateFreqMillis = DefMetricsUpdateFreqMillis + } + if updateFreqMillis < 250 { + updateFreqMillis = 250 // don't peg the CPU + } + + go h.startMetricsUpdater(updateFreqMillis) + return nil +} + +// IsLevelEnabled returns true if this target should emit logs for the specified level. +func (h *TargetHost) IsLevelEnabled(lvl Level) (enabled bool, level Level) { + level, enabled = h.filter.GetEnabledLevel(lvl) + return enabled, level +} + +// Shutdown stops processing log records after making best +// effort to flush queue. +func (h *TargetHost) Shutdown(ctx context.Context) error { + if atomic.SwapInt32(&h.shutdown, 1) != 0 { + return errors.New("targetHost shutdown called more than once") + } + + close(h.quit) + + // No more records can be accepted; now wait for read loop to exit. + select { + case <-ctx.Done(): + case <-h.done: + } + + // b.in channel should now be drained. + return h.target.Shutdown() +} + +// Log queues a log record to be output to this target's destination. +func (h *TargetHost) Log(rec *LogRec) { + if atomic.LoadInt32(&h.shutdown) != 0 { + return + } + + lgr := rec.Logger().Logr() + select { + case h.in <- rec: + default: + handler := lgr.options.onTargetQueueFull + if handler != nil && handler(h.target, rec, cap(h.in)) { + h.incDroppedCounter() + return // drop the record + } + h.incBlockedCounter() + + select { + case <-time.After(lgr.options.enqueueTimeout): + lgr.ReportError(fmt.Errorf("target enqueue timeout for log rec [%v]", rec)) + case h.in <- rec: // block until success or timeout + } + } +} + +func (h *TargetHost) setQueueSizeGauge(val float64) { + if h.targetMetrics != nil { + h.targetMetrics.queueSizeGauge.Set(val) + } +} + +func (h *TargetHost) incLoggedCounter() { + if h.targetMetrics != nil { + h.targetMetrics.loggedCounter.Inc() + } +} + +func (h *TargetHost) incErrorCounter() { + if h.targetMetrics != nil { + h.targetMetrics.errorCounter.Inc() + } +} + +func (h *TargetHost) incDroppedCounter() { + if h.targetMetrics != nil { + h.targetMetrics.droppedCounter.Inc() + } +} + +func (h *TargetHost) incBlockedCounter() { + if h.targetMetrics != nil { + h.targetMetrics.blockedCounter.Inc() + } +} + +// String returns a name for this target. +func (h *TargetHost) String() string { + return h.name +} + +// start accepts log records via In channel and writes to the +// supplied target, until Done channel signaled. +func (h *TargetHost) start() { + defer func() { + if r := recover(); r != nil { + fmt.Fprintln(os.Stderr, "TargetHost.start -- ", r) + go h.start() + } else { + close(h.done) + } + }() + + for { + var rec *LogRec + select { + case rec = <-h.in: + if rec.flush != nil { + h.flush(rec.flush) + } else { + err := h.writeRec(rec) + if err != nil { + h.incErrorCounter() + rec.Logger().Logr().ReportError(err) + } else { + h.incLoggedCounter() + } + } + case <-h.quit: + return + } + } +} + +func (h *TargetHost) writeRec(rec *LogRec) error { + level, enabled := h.filter.GetEnabledLevel(rec.Level()) + if !enabled { + // how did we get here? + return fmt.Errorf("level %s not enabled for target %s", rec.Level().Name, h.name) + } + + buf := rec.logger.lgr.BorrowBuffer() + defer rec.logger.lgr.ReleaseBuffer(buf) + + buf, err := h.formatter.Format(rec, level, buf) + if err != nil { + return err + } + + _, err = h.target.Write(buf.Bytes(), rec) + return err +} + +// startMetricsUpdater updates the metrics for any polled values every `updateFreqMillis` seconds until +// target is shut down. +func (h *TargetHost) startMetricsUpdater(updateFreqMillis int64) { + for { + select { + case <-h.done: + return + case <-time.After(time.Duration(updateFreqMillis) * time.Millisecond): + h.setQueueSizeGauge(float64(len(h.in))) + } + } +} + +// flush drains the queue and notifies when done. +func (h *TargetHost) flush(done chan<- struct{}) { + for { + var rec *LogRec + var err error + select { + case rec = <-h.in: + // ignore any redundant flush records. + if rec.flush == nil { + err = h.writeRec(rec) + if err != nil { + h.incErrorCounter() + rec.Logger().Logr().ReportError(err) + } + } + default: + done <- struct{}{} + return + } + } +} diff --git a/vendor/github.com/mattermost/logr/target/file.go b/vendor/github.com/mattermost/logr/v2/targets/file.go similarity index 57% rename from vendor/github.com/mattermost/logr/target/file.go rename to vendor/github.com/mattermost/logr/v2/targets/file.go index bc0bcd17..71133fac 100644 --- a/vendor/github.com/mattermost/logr/target/file.go +++ b/vendor/github.com/mattermost/logr/v2/targets/file.go @@ -1,11 +1,10 @@ -package target +package targets import ( - "context" + "errors" "io" - "github.com/mattermost/logr" - "github.com/wiggin77/merror" + "github.com/mattermost/logr/v2" "gopkg.in/natefinch/lumberjack.v2" ) @@ -13,38 +12,44 @@ type FileOptions struct { // Filename is the file to write logs to. Backup log files will be retained // in the same directory. It uses -lumberjack.log in // os.TempDir() if empty. - Filename string + Filename string `json:"filename"` // MaxSize is the maximum size in megabytes of the log file before it gets // rotated. It defaults to 100 megabytes. - MaxSize int + MaxSize int `json:"max_size"` // MaxAge is the maximum number of days to retain old log files based on the // timestamp encoded in their filename. Note that a day is defined as 24 // hours and may not exactly correspond to calendar days due to daylight // savings, leap seconds, etc. The default is not to remove old log files // based on age. - MaxAge int + MaxAge int `json:"max_age"` // MaxBackups is the maximum number of old log files to retain. The default // is to retain all old log files (though MaxAge may still cause them to get // deleted.) - MaxBackups int + MaxBackups int `json:"max_backups"` // Compress determines if the rotated log files should be compressed // using gzip. The default is not to perform compression. - Compress bool + Compress bool `json:"compress"` +} + +func (fo FileOptions) CheckValid() error { + if fo.Filename == "" { + return errors.New("filename cannot be empty") + } + return nil } // File outputs log records to a file which can be log rotated based on size or age. // Uses `https://github.com/natefinch/lumberjack` for rotation. type File struct { - logr.Basic out io.WriteCloser } // NewFileTarget creates a target capable of outputting log records to a rotated file. -func NewFileTarget(filter logr.Filter, formatter logr.Formatter, opts FileOptions, maxQueue int) *File { +func NewFileTarget(opts FileOptions) *File { lumber := &lumberjack.Logger{ Filename: opts.Filename, MaxSize: opts.MaxSize, @@ -53,40 +58,21 @@ func NewFileTarget(filter logr.Filter, formatter logr.Formatter, opts FileOption Compress: opts.Compress, } f := &File{out: lumber} - f.Basic.Start(f, f, filter, formatter, maxQueue) return f } -// Write converts the log record to bytes, via the Formatter, -// and outputs to a file. -func (f *File) Write(rec *logr.LogRec) error { - _, stacktrace := f.IsLevelEnabled(rec.Level()) - - buf := rec.Logger().Logr().BorrowBuffer() - defer rec.Logger().Logr().ReleaseBuffer(buf) - - buf, err := f.Formatter().Format(rec, stacktrace, buf) - if err != nil { - return err - } - _, err = f.out.Write(buf.Bytes()) - return err +// Init is called once to initialize the target. +func (f *File) Init() error { + return nil } -// Shutdown flushes any remaining log records and closes the file. -func (f *File) Shutdown(ctx context.Context) error { - errs := merror.New() - - err := f.Basic.Shutdown(ctx) - errs.Append(err) - - err = f.out.Close() - errs.Append(err) - - return errs.ErrorOrNil() +// Write outputs bytes to this file target. +func (f *File) Write(p []byte, rec *logr.LogRec) (int, error) { + return f.out.Write(p) } -// String returns a string representation of this target. -func (f *File) String() string { - return "FileTarget" +// Shutdown is called once to free/close any resources. +// Target queue is already drained when this is called. +func (f *File) Shutdown() error { + return f.out.Close() } diff --git a/vendor/github.com/mattermost/logr/v2/targets/syslog.go b/vendor/github.com/mattermost/logr/v2/targets/syslog.go new file mode 100644 index 00000000..fc3fcc5f --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/targets/syslog.go @@ -0,0 +1,112 @@ +// +build !windows,!nacl,!plan9 + +package targets + +import ( + "crypto/tls" + "errors" + "fmt" + + "github.com/mattermost/logr/v2" + syslog "github.com/wiggin77/srslog" +) + +// Syslog outputs log records to local or remote syslog. +type Syslog struct { + params *SyslogOptions + writer *syslog.Writer +} + +// SyslogOptions provides parameters for dialing a syslog daemon. +type SyslogOptions struct { + IP string `json:"ip,omitempty"` // deprecated + Host string `json:"host"` + Port int `json:"port"` + TLS bool `json:"tls"` + Cert string `json:"cert"` + Insecure bool `json:"insecure"` + Tag string `json:"tag"` +} + +func (so SyslogOptions) CheckValid() error { + if so.Host == "" && so.IP == "" { + return errors.New("missing host") + } + if so.Port == 0 { + return errors.New("missing port") + } + return nil +} + +// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog, with or without TLS. +func NewSyslogTarget(params *SyslogOptions) (*Syslog, error) { + if params == nil { + return nil, errors.New("params cannot be nil") + } + + s := &Syslog{ + params: params, + } + return s, nil +} + +// Init is called once to initialize the target. +func (s *Syslog) Init() error { + network := "tcp" + var config *tls.Config + + if s.params.TLS { + network = "tcp+tls" + config = &tls.Config{InsecureSkipVerify: s.params.Insecure} + if s.params.Cert != "" { + pool, err := GetCertPool(s.params.Cert) + if err != nil { + return err + } + config.RootCAs = pool + } + } + raddr := fmt.Sprintf("%s:%d", s.params.IP, s.params.Port) + if raddr == ":0" { + // If no IP:port provided then connect to local syslog. + raddr = "" + network = "" + } + + var err error + s.writer, err = syslog.DialWithTLSConfig(network, raddr, syslog.LOG_INFO, s.params.Tag, config) + return err +} + +// Write outputs bytes to this file target. +func (s *Syslog) Write(p []byte, rec *logr.LogRec) (int, error) { + txt := string(p) + n := len(txt) + var err error + + switch rec.Level() { + case logr.Panic, logr.Fatal: + err = s.writer.Crit(txt) + case logr.Error: + err = s.writer.Err(txt) + case logr.Warn: + err = s.writer.Warning(txt) + case logr.Debug, logr.Trace: + err = s.writer.Debug(txt) + default: + // logr.Info plus all custom levels. + err = s.writer.Info(txt) + } + + if err != nil { + n = 0 + // syslog writer will try to reconnect. + } + return n, err +} + +// Shutdown is called once to free/close any resources. +// Target queue is already drained when this is called. +func (s *Syslog) Shutdown() error { + return s.writer.Close() +} diff --git a/vendor/github.com/mattermost/logr/v2/targets/syslog_unsupported.go b/vendor/github.com/mattermost/logr/v2/targets/syslog_unsupported.go new file mode 100644 index 00000000..e4086e96 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/targets/syslog_unsupported.go @@ -0,0 +1,56 @@ +// +build windows nacl plan9 + +package targets + +import ( + "errors" + + "github.com/mattermost/logr/v2" + syslog "github.com/wiggin77/srslog" +) + +const ( + unsupported = "Syslog target is not supported on this platform." +) + +// Syslog outputs log records to local or remote syslog. +type Syslog struct { + params *SyslogOptions + writer *syslog.Writer +} + +// SyslogOptions provides parameters for dialing a syslog daemon. +type SyslogOptions struct { + IP string `json:"ip,omitempty"` // deprecated + Host string `json:"host"` + Port int `json:"port"` + TLS bool `json:"tls"` + Cert string `json:"cert"` + Insecure bool `json:"insecure"` + Tag string `json:"tag"` +} + +func (so SyslogOptions) CheckValid() error { + return errors.New(unsupported) +} + +// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog, with or without TLS. +func NewSyslogTarget(params *SyslogOptions) (*Syslog, error) { + return nil, errors.New(unsupported) +} + +// Init is called once to initialize the target. +func (s *Syslog) Init() error { + return errors.New(unsupported) +} + +// Write outputs bytes to this file target. +func (s *Syslog) Write(p []byte, rec *logr.LogRec) (int, error) { + return 0, errors.New(unsupported) +} + +// Shutdown is called once to free/close any resources. +// Target queue is already drained when this is called. +func (s *Syslog) Shutdown() error { + return errors.New(unsupported) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go b/vendor/github.com/mattermost/logr/v2/targets/tcp.go similarity index 59% rename from vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go rename to vendor/github.com/mattermost/logr/v2/targets/tcp.go index dad20474..ce73e034 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go +++ b/vendor/github.com/mattermost/logr/v2/targets/tcp.go @@ -1,7 +1,7 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. -package mlog +package targets import ( "context" @@ -12,10 +12,7 @@ import ( "sync" "time" - "github.com/hashicorp/go-multierror" - "github.com/mattermost/logr" - - _ "net/http/pprof" + "github.com/mattermost/logr/v2" ) const ( @@ -27,10 +24,8 @@ const ( // Tcp outputs log records to raw socket server. type Tcp struct { - logr.Basic - - params *TcpParams - addy string + options *TcpOptions + addy string mutex sync.Mutex conn net.Conn @@ -38,39 +33,49 @@ type Tcp struct { shutdown chan struct{} } -// TcpParams provides parameters for dialing a socket server. -type TcpParams struct { - IP string `json:"IP"` - Port int `json:"Port"` - TLS bool `json:"TLS"` - Cert string `json:"Cert"` - Insecure bool `json:"Insecure"` +// TcpOptions provides parameters for dialing a socket server. +type TcpOptions struct { + IP string `json:"ip,omitempty"` // deprecated + Host string `json:"host"` + Port int `json:"port"` + TLS bool `json:"tls"` + Cert string `json:"cert"` + Insecure bool `json:"insecure"` +} + +func (to TcpOptions) CheckValid() error { + if to.Host == "" && to.IP == "" { + return errors.New("missing host") + } + if to.Port == 0 { + return errors.New("missing port") + } + return nil } // NewTcpTarget creates a target capable of outputting log records to a raw socket, with or without TLS. -func NewTcpTarget(filter logr.Filter, formatter logr.Formatter, params *TcpParams, maxQueue int) (*Tcp, error) { +func NewTcpTarget(options *TcpOptions) *Tcp { tcp := &Tcp{ - params: params, - addy: fmt.Sprintf("%s:%d", params.IP, params.Port), + options: options, + addy: fmt.Sprintf("%s:%d", options.IP, options.Port), monitor: make(chan struct{}), shutdown: make(chan struct{}), } - tcp.Basic.Start(tcp, tcp, filter, formatter, maxQueue) + return tcp +} - return tcp, nil +// Init is called once to initialize the target. +func (tcp *Tcp) Init() error { + return nil } // getConn provides a net.Conn. If a connection already exists, it is returned immediately, // otherwise this method blocks until a new connection is created, timeout or shutdown. -func (tcp *Tcp) getConn() (net.Conn, error) { +func (tcp *Tcp) getConn(reporter func(err interface{})) (net.Conn, error) { tcp.mutex.Lock() defer tcp.mutex.Unlock() - Log(LvlTcpLogTarget, "getConn enter", String("addy", tcp.addy)) - defer Log(LvlTcpLogTarget, "getConn exit", String("addy", tcp.addy)) - if tcp.conn != nil { - Log(LvlTcpLogTarget, "reusing existing conn", String("addy", tcp.addy)) // use "With" once Zap is removed return tcp.conn, nil } @@ -84,14 +89,15 @@ func (tcp *Tcp) getConn() (net.Conn, error) { defer cancel() go func(ctx context.Context, ch chan result) { - Log(LvlTcpLogTarget, "dailing", String("addy", tcp.addy)) conn, err := tcp.dial(ctx) - if err == nil { - tcp.conn = conn - tcp.monitor = make(chan struct{}) - go monitor(tcp.conn, tcp.monitor) + if err != nil { + reporter(fmt.Errorf("log target %s connection error: %w", tcp.String(), err)) + return } - connChan <- result{conn: conn, err: err} + tcp.conn = conn + tcp.monitor = make(chan struct{}) + go monitor(tcp.conn, tcp.monitor) + ch <- result{conn: conn, err: err} }(ctx, connChan) select { @@ -107,23 +113,21 @@ func (tcp *Tcp) getConn() (net.Conn, error) { func (tcp *Tcp) dial(ctx context.Context) (net.Conn, error) { var dialer net.Dialer dialer.Timeout = time.Second * DialTimeoutSecs - conn, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("%s:%d", tcp.params.IP, tcp.params.Port)) + conn, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("%s:%d", tcp.options.IP, tcp.options.Port)) if err != nil { return nil, err } - if !tcp.params.TLS { + if !tcp.options.TLS { return conn, nil } - Log(LvlTcpLogTarget, "TLS handshake", String("addy", tcp.addy)) - tlsconfig := &tls.Config{ - ServerName: tcp.params.IP, - InsecureSkipVerify: tcp.params.Insecure, + ServerName: tcp.options.IP, + InsecureSkipVerify: tcp.options.Insecure, } - if tcp.params.Cert != "" { - pool, err := getCertPool(tcp.params.Cert) + if tcp.options.Cert != "" { + pool, err := GetCertPool(tcp.options.Cert) if err != nil { return nil, err } @@ -143,7 +147,6 @@ func (tcp *Tcp) close() error { var err error if tcp.conn != nil { - Log(LvlTcpLogTarget, "closing connection", String("addy", tcp.addy)) close(tcp.monitor) err = tcp.conn.Close() tcp.conn = nil @@ -152,69 +155,49 @@ func (tcp *Tcp) close() error { } // Shutdown stops processing log records after making best effort to flush queue. -func (tcp *Tcp) Shutdown(ctx context.Context) error { - errs := &multierror.Error{} - - Log(LvlTcpLogTarget, "shutting down", String("addy", tcp.addy)) - - if err := tcp.Basic.Shutdown(ctx); err != nil { - errs = multierror.Append(errs, err) - } - - if err := tcp.close(); err != nil { - errs = multierror.Append(errs, err) - } - +func (tcp *Tcp) Shutdown() error { + err := tcp.close() close(tcp.shutdown) - return errs.ErrorOrNil() + return err } // Write converts the log record to bytes, via the Formatter, and outputs to the socket. // Called by dedicated target goroutine and will block until success or shutdown. -func (tcp *Tcp) Write(rec *logr.LogRec) error { - _, stacktrace := tcp.IsLevelEnabled(rec.Level()) - - buf := rec.Logger().Logr().BorrowBuffer() - defer rec.Logger().Logr().ReleaseBuffer(buf) - - buf, err := tcp.Formatter().Format(rec, stacktrace, buf) - if err != nil { - return err - } - +func (tcp *Tcp) Write(p []byte, rec *logr.LogRec) (int, error) { try := 1 backoff := RetryBackoffMillis for { select { case <-tcp.shutdown: - return err + return 0, nil default: } - conn, err := tcp.getConn() + reporter := rec.Logger().Logr().ReportError + + conn, err := tcp.getConn(reporter) if err != nil { - Log(LvlTcpLogTarget, "failed getting connection", String("addy", tcp.addy), Err(err)) - reporter := rec.Logger().Logr().ReportError reporter(fmt.Errorf("log target %s connection error: %w", tcp.String(), err)) backoff = tcp.sleep(backoff) continue } - conn.SetWriteDeadline(time.Now().Add(time.Second * WriteTimeoutSecs)) - _, err = buf.WriteTo(conn) + err = conn.SetWriteDeadline(time.Now().Add(time.Second * WriteTimeoutSecs)) + if err != nil { + reporter(fmt.Errorf("log target %s set write deadline error: %w", tcp.String(), err)) + } + + count, err := conn.Write(p) if err == nil { - return nil + return count, nil } - Log(LvlTcpLogTarget, "write error", String("addy", tcp.addy), Err(err)) - reporter := rec.Logger().Logr().ReportError reporter(fmt.Errorf("log target %s write error: %w", tcp.String(), err)) _ = tcp.close() backoff = tcp.sleep(backoff) try++ - Log(LvlTcpLogTarget, "retrying write", String("addy", tcp.addy), Int("try", try)) } } @@ -223,13 +206,8 @@ func (tcp *Tcp) Write(rec *logr.LogRec) error { // take a long time to detect a loss of connectivity on a socket when only writing; // the writes simply fail without an error returned. func monitor(conn net.Conn, done <-chan struct{}) { - addy := conn.RemoteAddr().String() - defer Log(LvlTcpLogTarget, "monitor exiting", String("addy", addy)) - buf := make([]byte, 1) for { - Log(LvlTcpLogTarget, "monitor loop", String("addy", addy)) - select { case <-done: return @@ -249,7 +227,6 @@ func monitor(conn net.Conn, done <-chan struct{}) { } // Any other error closes the connection, forcing a reconnect. - Log(LvlTcpLogTarget, "monitor closing connection", Err(err)) conn.Close() return } @@ -257,7 +234,7 @@ func monitor(conn net.Conn, done <-chan struct{}) { // String returns a string representation of this target. func (tcp *Tcp) String() string { - return fmt.Sprintf("TcpTarget[%s:%d]", tcp.params.IP, tcp.params.Port) + return fmt.Sprintf("TcpTarget[%s:%d]", tcp.options.IP, tcp.options.Port) } func (tcp *Tcp) sleep(backoff int64) int64 { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem b/vendor/github.com/mattermost/logr/v2/targets/test-tls-client-cert.pem similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem rename to vendor/github.com/mattermost/logr/v2/targets/test-tls-client-cert.pem diff --git a/vendor/github.com/mattermost/logr/v2/targets/testing.go b/vendor/github.com/mattermost/logr/v2/targets/testing.go new file mode 100644 index 00000000..ea3df70c --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/targets/testing.go @@ -0,0 +1,72 @@ +package targets + +import ( + "strings" + "sync" + "testing" + + "github.com/mattermost/logr/v2" + "github.com/mattermost/logr/v2/formatters" +) + +// Testing is a simple log target that writes to a (*testing.T) log. +type Testing struct { + mux sync.Mutex + t *testing.T +} + +func NewTestingTarget(t *testing.T) *Testing { + return &Testing{ + t: t, + } +} + +// Init is called once to initialize the target. +func (tt *Testing) Init() error { + return nil +} + +// Write outputs bytes to this file target. +func (tt *Testing) Write(p []byte, rec *logr.LogRec) (int, error) { + tt.mux.Lock() + defer tt.mux.Unlock() + + if tt.t != nil { + s := strings.TrimSpace(string(p)) + tt.t.Log(s) + } + return len(p), nil +} + +// Shutdown is called once to free/close any resources. +// Target queue is already drained when this is called. +func (tt *Testing) Shutdown() error { + tt.mux.Lock() + defer tt.mux.Unlock() + + tt.t = nil + return nil +} + +// CreateTestLogger creates a logger for unit tests. Log records are output to `(*testing.T).Log`. +// A new logger is returned along with a method to shutdown the new logger. +func CreateTestLogger(t *testing.T, levels ...logr.Level) (logger logr.Logger, shutdown func() error) { + lgr, _ := logr.New() + filter := logr.NewCustomFilter(levels...) + formatter := &formatters.Plain{EnableCaller: true} + target := NewTestingTarget(t) + + if err := lgr.AddTarget(target, "test", filter, formatter, 1000); err != nil { + t.Fail() + } + shutdown = func() error { + err := lgr.Shutdown() + if err != nil { + target.mux.Lock() + target.t.Error("error shutting down test logger", err) + target.mux.Unlock() + } + return err + } + return lgr.NewLogger(), shutdown +} diff --git a/vendor/github.com/mattermost/logr/v2/targets/utils.go b/vendor/github.com/mattermost/logr/v2/targets/utils.go new file mode 100644 index 00000000..6e605af2 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/targets/utils.go @@ -0,0 +1,33 @@ +package targets + +import ( + "crypto/x509" + "encoding/base64" + "errors" + "io/ioutil" +) + +// GetCertPool returns a x509.CertPool containing the cert(s) +// from `cert`, which can be a path to a .pem or .crt file, +// or a base64 encoded cert. +func GetCertPool(cert string) (*x509.CertPool, error) { + if cert == "" { + return nil, errors.New("no cert provided") + } + + // first treat as a file and try to read. + serverCert, err := ioutil.ReadFile(cert) + if err != nil { + // maybe it's a base64 encoded cert + serverCert, err = base64.StdEncoding.DecodeString(cert) + if err != nil { + return nil, errors.New("cert cannot be read") + } + } + + pool := x509.NewCertPool() + if ok := pool.AppendCertsFromPEM(serverCert); ok { + return pool, nil + } + return nil, errors.New("cannot parse cert") +} diff --git a/vendor/github.com/mattermost/logr/v2/targets/writer.go b/vendor/github.com/mattermost/logr/v2/targets/writer.go new file mode 100644 index 00000000..d9f64d76 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/targets/writer.go @@ -0,0 +1,38 @@ +package targets + +import ( + "io" + "io/ioutil" + + "github.com/mattermost/logr/v2" +) + +// Writer outputs log records to any `io.Writer`. +type Writer struct { + out io.Writer +} + +// NewWriterTarget creates a target capable of outputting log records to an io.Writer. +func NewWriterTarget(out io.Writer) *Writer { + if out == nil { + out = ioutil.Discard + } + w := &Writer{out: out} + return w +} + +// Init is called once to initialize the target. +func (w *Writer) Init() error { + return nil +} + +// Write outputs bytes to this file target. +func (w *Writer) Write(p []byte, rec *logr.LogRec) (int, error) { + return w.out.Write(p) +} + +// Shutdown is called once to free/close any resources. +// Target queue is already drained when this is called. +func (w *Writer) Shutdown() error { + return nil +} diff --git a/vendor/github.com/mattermost/logr/timeout.go b/vendor/github.com/mattermost/logr/v2/timeout.go similarity index 100% rename from vendor/github.com/mattermost/logr/timeout.go rename to vendor/github.com/mattermost/logr/v2/timeout.go diff --git a/vendor/github.com/mattermost/mattermost-cloud/k8s/client.go b/vendor/github.com/mattermost/mattermost-cloud/k8s/client.go index c521f5ca..27478715 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/k8s/client.go +++ b/vendor/github.com/mattermost/mattermost-cloud/k8s/client.go @@ -95,10 +95,6 @@ func createKubeClient(config *rest.Config, logger log.FieldLogger) (*KubeClient, nil } -func (kc *KubeClient) getKubeConfigClientset() (*kubernetes.Clientset, error) { - return kubernetes.NewForConfig(kc.config) -} - // GetConfig exposes the rest.Config for use with other k8s packages. func (kc *KubeClient) GetConfig() *rest.Config { return kc.config diff --git a/vendor/github.com/mattermost/mattermost-cloud/k8s/manifest.go b/vendor/github.com/mattermost/mattermost-cloud/k8s/manifest.go index 51d8fec6..5a76f8a8 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/k8s/manifest.go +++ b/vendor/github.com/mattermost/mattermost-cloud/k8s/manifest.go @@ -7,7 +7,7 @@ package k8s import ( "bytes" "fmt" - "io/ioutil" + "os" "path" "reflect" @@ -21,9 +21,11 @@ import ( appsv1beta2 "k8s.io/api/apps/v1beta2" apiv1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + policyv1 "k8s.io/api/policy/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacbetav1 "k8s.io/api/rbac/v1beta1" + storagev1 "k8s.io/api/storage/v1" apixv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apixv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apixv1beta1scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" @@ -63,7 +65,7 @@ func (kc *KubeClient) CreateFromFiles(files []ManifestFile) error { // the provided file. An error is returned if any of the create actions failed. // This process equates to running `kubectl create -f FILENAME`. func (kc *KubeClient) CreateFromFile(file ManifestFile, installationName string) error { - data, err := ioutil.ReadFile(file.Path) + data, err := os.ReadFile(file.Path) if err != nil { return err } @@ -159,6 +161,8 @@ func (kc *KubeClient) createFileResource(deployNamespace string, obj interface{} return kc.createOrUpdateDaemonSetV1(deployNamespace, obj.(*appsv1.DaemonSet)) case *policyv1beta1.PodDisruptionBudget: return kc.createOrUpdatePodDisruptionBudgetBetaV1(deployNamespace, obj.(*policyv1beta1.PodDisruptionBudget)) + case *policyv1.PodDisruptionBudget: + return kc.createOrUpdatePodDisruptionBudgetV1(deployNamespace, obj.(*policyv1.PodDisruptionBudget)) case *networkingv1.NetworkPolicy: return kc.createOrUpdateNetworkPolicyV1(deployNamespace, obj.(*networkingv1.NetworkPolicy)) case *apiregistrationv1beta1.APIService: @@ -167,7 +171,9 @@ func (kc *KubeClient) createFileResource(deployNamespace string, obj interface{} return kc.createOrUpdatePersistentVolume(obj.(*apiv1.PersistentVolume)) case *apiv1.PersistentVolumeClaim: return kc.createOrUpdatePersistentVolumeClaim(deployNamespace, obj.(*apiv1.PersistentVolumeClaim)) + case *storagev1.StorageClass: + return kc.createOrUpdateStorageClass(obj.(*storagev1.StorageClass)) default: - return nil, fmt.Errorf("Error: unsupported k8s manifest type %T", o) + return nil, fmt.Errorf("error: unsupported k8s manifest type %T", o) } } diff --git a/vendor/github.com/mattermost/mattermost-cloud/k8s/poddisruptionbudget.go b/vendor/github.com/mattermost/mattermost-cloud/k8s/poddisruptionbudget.go index 0c1ac8e5..ee17ff27 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/k8s/poddisruptionbudget.go +++ b/vendor/github.com/mattermost/mattermost-cloud/k8s/poddisruptionbudget.go @@ -7,6 +7,7 @@ package k8s import ( "context" + v1 "k8s.io/api/policy/v1" v1beta1 "k8s.io/api/policy/v1beta1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,3 +26,19 @@ func (kc *KubeClient) createOrUpdatePodDisruptionBudgetBetaV1(namespace string, return kc.Clientset.PolicyV1beta1().PodDisruptionBudgets(namespace).Update(ctx, podDisruptionBudget, metav1.UpdateOptions{}) } + +func (kc *KubeClient) createOrUpdatePodDisruptionBudgetV1(namespace string, podDisruptionBudget *v1.PodDisruptionBudget) (metav1.Object, error) { + ctx := context.TODO() + pdb, err := kc.Clientset.PolicyV1().PodDisruptionBudgets(namespace).Get(ctx, podDisruptionBudget.GetName(), metav1.GetOptions{}) + if err != nil && !k8sErrors.IsNotFound(err) { + return nil, err + } + + if err != nil && k8sErrors.IsNotFound(err) { + return kc.Clientset.PolicyV1().PodDisruptionBudgets(namespace).Create(ctx, podDisruptionBudget, metav1.CreateOptions{}) + } + + // TODO: Fix update fail due to the error (metadata.resourceVersion: Invalid value: 0x0: must be specified for an update) + pdb.Spec = podDisruptionBudget.Spec + return kc.Clientset.PolicyV1().PodDisruptionBudgets(namespace).Update(ctx, pdb, metav1.UpdateOptions{}) +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/k8s/storage_class.go b/vendor/github.com/mattermost/mattermost-cloud/k8s/storage_class.go index 702ea158..89fae023 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/k8s/storage_class.go +++ b/vendor/github.com/mattermost/mattermost-cloud/k8s/storage_class.go @@ -7,6 +7,7 @@ package k8s import ( "context" + storagev1 "k8s.io/api/storage/v1" "k8s.io/api/storage/v1beta1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,3 +30,17 @@ func (kc *KubeClient) UpdateStorageClassVolumeBindingMode(class string) (metav1. } return kc.Clientset.StorageV1beta1().StorageClasses().Create(ctx, storageClass, metav1.CreateOptions{}) } + +func (kc *KubeClient) createOrUpdateStorageClass(storage *storagev1.StorageClass) (metav1.Object, error) { + ctx := context.TODO() + _, err := kc.Clientset.StorageV1().StorageClasses().Get(ctx, storage.GetName(), metav1.GetOptions{}) + if err != nil && !k8sErrors.IsNotFound(err) { + return nil, err + } + + if err != nil && k8sErrors.IsNotFound(err) { + return kc.Clientset.StorageV1().StorageClasses().Create(ctx, storage, metav1.CreateOptions{}) + } + + return kc.Clientset.StorageV1().StorageClasses().Update(ctx, storage, metav1.UpdateOptions{}) +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/aws_types.go b/vendor/github.com/mattermost/mattermost-cloud/model/aws_types.go index f2f9234b..96724dae 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/aws_types.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/aws_types.go @@ -8,3 +8,11 @@ package model type Certificate struct { ARN *string } + +type LaunchTemplateData struct { + Name string + ClusterName string + AMI string + MaxPodsPerNode int64 + SecurityGroups []string +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/client.go b/vendor/github.com/mattermost/mattermost-cloud/model/client.go index a8233f9d..9d8b534e 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/client.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/client.go @@ -8,7 +8,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" @@ -44,7 +44,7 @@ func NewClientWithHeaders(address string, headers map[string]string) *Client { // closeBody ensures the Body of an http.Response is properly closed. func closeBody(r *http.Response) { if r.Body != nil { - _, _ = ioutil.ReadAll(r.Body) + _, _ = io.ReadAll(r.Body) _ = r.Body.Close() } } @@ -122,7 +122,7 @@ func (c *Client) CreateCluster(request *CreateClusterRequest) (*ClusterDTO, erro switch resp.StatusCode { case http.StatusAccepted: - return ClusterDTOFromReader(resp.Body) + return DTOFromReader[ClusterDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -157,7 +157,7 @@ func (c *Client) ProvisionCluster(clusterID string, request *ProvisionClusterReq switch resp.StatusCode { case http.StatusAccepted: - return ClusterDTOFromReader(resp.Body) + return DTOFromReader[ClusterDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -174,7 +174,7 @@ func (c *Client) GetCluster(clusterID string) (*ClusterDTO, error) { switch resp.StatusCode { case http.StatusOK: - return ClusterDTOFromReader(resp.Body) + return DTOFromReader[ClusterDTO](resp.Body) case http.StatusNotFound: return nil, nil @@ -201,7 +201,7 @@ func (c *Client) GetClusters(request *GetClustersRequest) ([]*ClusterDTO, error) switch resp.StatusCode { case http.StatusOK: - return ClusterDTOsFromReader(resp.Body) + return DTOsFromReader[ClusterDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -235,7 +235,7 @@ func (c *Client) UpdateCluster(clusterID string, request *UpdateClusterRequest) switch resp.StatusCode { case http.StatusAccepted: - return ClusterDTOFromReader(resp.Body) + return DTOFromReader[ClusterDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -252,7 +252,7 @@ func (c *Client) UpgradeCluster(clusterID string, request *PatchUpgradeClusterRe switch resp.StatusCode { case http.StatusAccepted: - return ClusterDTOFromReader(resp.Body) + return DTOFromReader[ClusterDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -269,7 +269,24 @@ func (c *Client) ResizeCluster(clusterID string, request *PatchClusterSizeReques switch resp.StatusCode { case http.StatusAccepted: - return ClusterDTOFromReader(resp.Body) + return DTOFromReader[ClusterDTO](resp.Body) + + default: + return nil, errors.Errorf("failed with status code %d", resp.StatusCode) + } +} + +// CreateNodegroups requests the creation of new nodegroups in the given cluster. +func (c *Client) CreateNodegroups(clusterID string, request *CreateNodegroupsRequest) (*ClusterDTO, error) { + resp, err := c.doPost(c.buildURL("/api/cluster/%s/nodegroups", clusterID), request) + if err != nil { + return nil, err + } + defer closeBody(resp) + + switch resp.StatusCode { + case http.StatusAccepted: + return DTOFromReader[ClusterDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -303,7 +320,7 @@ func (c *Client) AddClusterAnnotations(clusterID string, annotationsRequest *Add switch resp.StatusCode { case http.StatusOK: - return ClusterDTOFromReader(resp.Body) + return DTOFromReader[ClusterDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -337,7 +354,7 @@ func (c *Client) CreateInstallation(request *CreateInstallationRequest) (*Instal switch resp.StatusCode { case http.StatusAccepted: - return InstallationDTOFromReader(resp.Body) + return DTOFromReader[InstallationDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -378,7 +395,7 @@ func (c *Client) GetInstallation(installationID string, request *GetInstallation switch resp.StatusCode { case http.StatusOK: - return InstallationDTOFromReader(resp.Body) + return DTOFromReader[InstallationDTO](resp.Body) case http.StatusNotFound: return nil, nil @@ -432,7 +449,7 @@ func (c *Client) GetInstallations(request *GetInstallationsRequest) ([]*Installa switch resp.StatusCode { case http.StatusOK: - return InstallationDTOsFromReader(resp.Body) + return DTOsFromReader[InstallationDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -473,7 +490,7 @@ func (c *Client) UpdateInstallation(installationID string, request *PatchInstall switch resp.StatusCode { case http.StatusAccepted: - return InstallationDTOFromReader(resp.Body) + return DTOFromReader[InstallationDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -490,7 +507,7 @@ func (c *Client) HibernateInstallation(installationID string) (*InstallationDTO, switch resp.StatusCode { case http.StatusAccepted: - return InstallationDTOFromReader(resp.Body) + return DTOFromReader[InstallationDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -507,7 +524,7 @@ func (c *Client) WakeupInstallation(installationID string, request *PatchInstall switch resp.StatusCode { case http.StatusAccepted: - return InstallationDTOFromReader(resp.Body) + return DTOFromReader[InstallationDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -551,10 +568,28 @@ func (c *Client) DeleteInstallation(installationID string) error { } } +// UpdateInstallationDeletion updates the deletion parameters of an installation +// that is still pending deletion. +func (c *Client) UpdateInstallationDeletion(installationID string, request *PatchInstallationDeletionRequest) (*InstallationDTO, error) { + resp, err := c.doPut(c.buildURL("/api/installation/%s/deletion", installationID), request) + if err != nil { + return nil, err + } + defer closeBody(resp) + + switch resp.StatusCode { + case http.StatusOK: + return DTOFromReader[InstallationDTO](resp.Body) + + default: + return nil, errors.Errorf("failed with status code %d", resp.StatusCode) + } +} + // CancelInstallationDeletion cancels the deletion of an installation that is -// still pending deletion +// still pending deletion. func (c *Client) CancelInstallationDeletion(installationID string) error { - resp, err := c.doPost(c.buildURL("/api/installation/%s/cancel_deletion", installationID), nil) + resp, err := c.doPost(c.buildURL("/api/installation/%s/deletion/cancel", installationID), nil) if err != nil { return err } @@ -579,7 +614,7 @@ func (c *Client) AddInstallationDNS(installationID string, request *AddDNSRecord switch resp.StatusCode { case http.StatusAccepted: - return InstallationDTOFromReader(resp.Body) + return DTOFromReader[InstallationDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) } @@ -595,7 +630,7 @@ func (c *Client) SetInstallationDomainPrimary(installationID, installationDNSID switch resp.StatusCode { case http.StatusAccepted: - return InstallationDTOFromReader(resp.Body) + return DTOFromReader[InstallationDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) } @@ -761,7 +796,7 @@ func (c *Client) AddInstallationAnnotations(installationID string, annotationsRe switch resp.StatusCode { case http.StatusOK: - return InstallationDTOFromReader(resp.Body) + return DTOFromReader[InstallationDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -956,7 +991,7 @@ func (c *Client) ExecClusterInstallationCLI(clusterInstallationID, command strin } defer closeBody(resp) - bytes, _ := ioutil.ReadAll(resp.Body) + bytes, _ := io.ReadAll(resp.Body) switch resp.StatusCode { case http.StatusOK: @@ -977,7 +1012,7 @@ func (c *Client) CreateGroup(request *CreateGroupRequest) (*GroupDTO, error) { switch resp.StatusCode { case http.StatusOK: - return GroupDTOFromReader(resp.Body) + return DTOFromReader[GroupDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -994,7 +1029,7 @@ func (c *Client) UpdateGroup(request *PatchGroupRequest) (*GroupDTO, error) { switch resp.StatusCode { case http.StatusOK: - return GroupDTOFromReader(resp.Body) + return DTOFromReader[GroupDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -1028,7 +1063,7 @@ func (c *Client) GetGroup(groupID string) (*GroupDTO, error) { switch resp.StatusCode { case http.StatusOK: - return GroupDTOFromReader(resp.Body) + return DTOFromReader[GroupDTO](resp.Body) case http.StatusNotFound: return nil, nil @@ -1055,7 +1090,7 @@ func (c *Client) GetGroups(request *GetGroupsRequest) ([]*GroupDTO, error) { switch resp.StatusCode { case http.StatusOK: - return GroupDTOsFromReader(resp.Body) + return DTOsFromReader[GroupDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -1170,7 +1205,7 @@ func (c *Client) AddGroupAnnotations(groupID string, annotationsRequest *AddAnno switch resp.StatusCode { case http.StatusOK: - return GroupDTOFromReader(resp.Body) + return DTOFromReader[GroupDTO](resp.Body) default: return nil, errors.Errorf("failed with status code %d", resp.StatusCode) @@ -1692,3 +1727,18 @@ func (c *Client) DeleteSubscription(subID string) error { return errors.Errorf("failed with status code %d", resp.StatusCode) } } + +func (c *Client) GetClusterInstallationStatus(clusterInstallationID string) (*ClusterInstallationStatus, error) { + resp, err := c.doGet(c.buildURL("/api/cluster_installation/%s/status", clusterInstallationID)) + if err != nil { + return nil, err + } + defer closeBody(resp) + + switch resp.StatusCode { + case http.StatusOK: + return NewClusterInstallationStatusFromReader(resp.Body) + default: + return nil, errors.Errorf("failed with status code %d", resp.StatusCode) + } +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/cluster.go b/vendor/github.com/mattermost/mattermost-cloud/model/cluster.go index 1ec51578..a7d747a1 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/cluster.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/cluster.go @@ -12,15 +12,6 @@ import ( //go:generate provisioner-code-gen generate --out-file=cluster_gen.go --boilerplate-file=../hack/boilerplate/boilerplate.generatego.txt --type=github.com/mattermost/mattermost-cloud/model.Cluster --generator=get_id,get_state,is_deleted,as_resources -const ( - // MattermostWebhook is the name of the Environment Variable which - // may contain a Mattermost webhook to send notifications to a Mattermost installation - MattermostWebhook = "mattermost-webhook" - // MattermostChannel is the name of the Environment Variable which - // may contain a Mattermost channel in which notifications are going to be sent - MattermostChannel = "mattermost-channel" -) - // Cluster represents a Kubernetes cluster. type Cluster struct { ID string @@ -94,7 +85,8 @@ type AnnotationsFilter struct { MatchAllIDs []string } -var clusterVersionMatcher = regexp.MustCompile(`^(([0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3})|(latest))$`) +// EKS only support x.xx versioning +var clusterVersionMatcher = regexp.MustCompile(`^(([0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3})|([0-9]{1,3}.[0-9]{1,3})|(latest))$`) // ValidClusterVersion returns true if the provided version is either "latest" // or a valid k8s version number. diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_dto.go b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_dto.go index ba0ceb6b..cfa7bccf 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_dto.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_dto.go @@ -4,38 +4,8 @@ package model -import ( - "encoding/json" - "io" -) - // ClusterDTO represents cluster entity with connected data. DTO stands for Data Transfer Object. type ClusterDTO struct { *Cluster Annotations []*Annotation `json:"Annotations,omitempty"` } - -// ClusterDTOFromReader decodes a json-encoded cluster DTO from the given io.Reader. -func ClusterDTOFromReader(reader io.Reader) (*ClusterDTO, error) { - clusterDTO := ClusterDTO{} - decoder := json.NewDecoder(reader) - err := decoder.Decode(&clusterDTO) - if err != nil && err != io.EOF { - return nil, err - } - - return &clusterDTO, nil -} - -// ClusterDTOsFromReader decodes a json-encoded list of cluster DTOs from the given io.Reader. -func ClusterDTOsFromReader(reader io.Reader) ([]*ClusterDTO, error) { - clusterDTOs := []*ClusterDTO{} - decoder := json.NewDecoder(reader) - - err := decoder.Decode(&clusterDTOs) - if err != nil && err != io.EOF { - return nil, err - } - - return clusterDTOs, nil -} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_eks.go b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_eks.go deleted file mode 100644 index 7f5c793d..00000000 --- a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_eks.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. -// - -package model - -import "encoding/json" - -// EKSMetadata is metadata for EKS cluster and node groups. -type EKSMetadata struct { - KubernetesVersion *string - VPC string - Networking string - ClusterRoleARN *string - - EKSNodeGroups EKSNodeGroups -} - -// EKSNodeGroups maps node group name to configuration. -type EKSNodeGroups map[string]EKSNodeGroup - -// EKSNodeGroup is node group configuration. -type EKSNodeGroup struct { - RoleARN *string - InstanceTypes []string - AMIVersion *string - DesiredSize *int32 - MinSize *int32 - MaxSize *int32 -} - -// NewEKSMetadata creates an instance of EKSMetadata given the raw provisioner metadata. -func NewEKSMetadata(metadataBytes []byte) (*EKSMetadata, error) { - // Check if length of metadata is 0 as opposed to if the value is nil. This - // is done to avoid an issue encountered where the metadata value provided - // had a length of 0, but had non-zero capacity. - if len(metadataBytes) == 0 || string(metadataBytes) == "null" { - // TODO: remove "null" check after sqlite is gone. - return nil, nil - } - - eksMetadata := EKSMetadata{} - err := json.Unmarshal(metadataBytes, &eksMetadata) - if err != nil { - return nil, err - } - - return &eksMetadata, nil -} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_installation.go b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_installation.go index 50dc17cb..5169e746 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_installation.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_installation.go @@ -7,6 +7,8 @@ package model import ( "encoding/json" "io" + + "github.com/pkg/errors" ) // ClusterInstallation is a single namespace within a cluster composing a potentially larger installation. @@ -33,6 +35,16 @@ type ClusterInstallationFilter struct { IsActive *bool } +type ClusterInstallationStatus struct { + InstallationFound bool `json:"InstallationFound,omitempty"` + Replicas *int32 `json:"Replicas,omitempty"` + TotalPod *int32 `json:"TotalPod,omitempty"` + RunningPod *int32 `json:"RunningPod,omitempty"` + ReadyPod *int32 `json:"ReadyPod,omitempty"` + StartedPod *int32 `json:"StartedPod,omitempty"` + ReadyLocalServer *int32 `json:"ReadyLocalServer,omitempty"` +} + // MigrateClusterInstallationRequest describes the parameters used to compose migration request between two clusters. type MigrateClusterInstallationRequest struct { InstallationID string @@ -124,3 +136,13 @@ func MigrateClusterInstallationResponseFromReader(reader io.Reader) (*MigrateClu return &migrateClusterInstallationResponse, nil } + +func NewClusterInstallationStatusFromReader(reader io.Reader) (*ClusterInstallationStatus, error) { + var status ClusterInstallationStatus + err := json.NewDecoder(reader).Decode(&status) + if err != nil && err != io.EOF { + return nil, errors.Wrap(err, "failed to decode ClusterInstallationStatus") + } + + return &status, nil +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_request.go b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_request.go index 7d535a19..f64dbd39 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_request.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_request.go @@ -17,38 +17,34 @@ const ( NetworkingCalico = "calico" // NetworkingAmazon is Amazon networking plugin. NetworkingAmazon = "amazon-vpc-routed-eni" -) - -var ( - defaultEKSRoleARN string - defaultNodeGroupRoleARN string + // NetworkingVpcCni is Amazon VPC CNI networking plugin. + NetworkingVpcCni = "amazon-vpc-cni" ) // CreateClusterRequest specifies the parameters for a new cluster. type CreateClusterRequest struct { - Provider string `json:"provider,omitempty"` - Zones []string `json:"zones,omitempty"` - Version string `json:"version,omitempty"` - KopsAMI string `json:"kops-ami,omitempty"` - MasterInstanceType string `json:"master-instance-type,omitempty"` - MasterCount int64 `json:"master-count,omitempty"` - NodeInstanceType string `json:"node-instance-type,omitempty"` - NodeMinCount int64 `json:"node-min-count,omitempty"` - NodeMaxCount int64 `json:"node-max-count,omitempty"` - AllowInstallations bool `json:"allow-installations,omitempty"` - APISecurityLock bool `json:"api-security-lock,omitempty"` - DesiredUtilityVersions map[string]*HelmUtilityVersion `json:"utility-versions,omitempty"` - Annotations []string `json:"annotations,omitempty"` - Networking string `json:"networking,omitempty"` - VPC string `json:"vpc,omitempty"` - MaxPodsPerNode int64 - EKSConfig *EKSConfig `json:"EKSConfig,omitempty"` -} - -// EKSConfig is EKS cluster configuration. -type EKSConfig struct { - ClusterRoleARN *string `json:"clusterRoleARN,omitempty"` - NodeGroups map[string]EKSNodeGroup `json:"nodeGroups,omitempty"` + Provider string `json:"provider,omitempty"` + Zones []string `json:"zones,omitempty"` + Version string `json:"version,omitempty"` + AMI string `json:"ami,omitempty"` + MasterInstanceType string `json:"master-instance-type,omitempty"` + MasterCount int64 `json:"master-count,omitempty"` + NodeInstanceType string `json:"node-instance-type,omitempty"` + NodeMinCount int64 `json:"node-min-count,omitempty"` + NodeMaxCount int64 `json:"node-max-count,omitempty"` + AllowInstallations bool `json:"allow-installations,omitempty"` + APISecurityLock bool `json:"api-security-lock,omitempty"` + DesiredUtilityVersions map[string]*HelmUtilityVersion `json:"utility-versions,omitempty"` + Annotations []string `json:"annotations,omitempty"` + Networking string `json:"networking,omitempty"` + VPC string `json:"vpc,omitempty"` + MaxPodsPerNode int64 `json:"max-pods-per-node,omitempty"` + ClusterRoleARN string `json:"cluster-role-arn,omitempty"` + NodeRoleARN string `json:"node-role-arn,omitempty"` + Provisioner string `json:"provisioner,omitempty"` + AdditionalNodeGroups map[string]NodeGroupMetadata `json:"additional-node-groups,omitempty"` + NodeGroupWithPublicSubnet []string `json:"nodegroup-with-public-subnet,omitempty"` + NodeGroupWithSecurityGroup []string `json:"nodegroup-with-sg,omitempty"` } func (request *CreateClusterRequest) setUtilityDefaults(utilityName string) { @@ -76,11 +72,23 @@ func (request *CreateClusterRequest) SetDefaults() { if len(request.Provider) == 0 { request.Provider = ProviderAWS } + if len(request.Provisioner) == 0 { + request.Provisioner = ProvisionerKops + } if len(request.Version) == 0 { - request.Version = "latest" + if request.Provisioner == ProvisionerEKS { + request.Version = "1.23" + } else { + request.Version = "latest" + } } + if len(request.Zones) == 0 { - request.Zones = []string{"us-east-1a"} + if request.Provisioner == ProvisionerEKS { + request.Zones = []string{"us-east-1a", "us-east-1b"} + } else { + request.Zones = []string{"us-east-1a"} + } } if len(request.MasterInstanceType) == 0 { request.MasterInstanceType = "t3.medium" @@ -103,15 +111,20 @@ func (request *CreateClusterRequest) SetDefaults() { if len(request.Networking) == 0 { request.Networking = NetworkingCalico } - if request.EKSConfig != nil { - if request.EKSConfig.ClusterRoleARN == nil { - request.EKSConfig.ClusterRoleARN = &defaultEKSRoleARN - } - for _, ng := range request.EKSConfig.NodeGroups { - if ng.RoleARN == nil { - ng.RoleARN = &defaultNodeGroupRoleARN + if request.Provisioner == ProvisionerEKS { + for ng, meta := range request.AdditionalNodeGroups { + if len(meta.InstanceType) == 0 { + meta.InstanceType = "m5.large" + } + if meta.MinCount == 0 { + meta.MinCount = 2 } + if meta.MaxCount == 0 { + meta.MaxCount = meta.MinCount + } + + request.AdditionalNodeGroups[ng] = meta } } @@ -126,6 +139,9 @@ func (request *CreateClusterRequest) Validate() error { if request.Provider != ProviderAWS { return errors.Errorf("unsupported provider %s", request.Provider) } + if request.Provisioner != ProvisionerKops && request.Provisioner != ProvisionerEKS { + return errors.Errorf("unsupported provisioner %s", request.Provisioner) + } if !ValidClusterVersion(request.Version) { return errors.Errorf("unsupported cluster version %s", request.Version) } @@ -143,13 +159,54 @@ func (request *CreateClusterRequest) Validate() error { } // TODO: check zones and instance types? - if request.EKSConfig != nil { - if request.EKSConfig.ClusterRoleARN == nil || *request.EKSConfig.ClusterRoleARN == "" { + if request.Provisioner == ProvisionerEKS { + if request.ClusterRoleARN == "" { return errors.New("cluster role ARN for EKS cluster cannot be empty") } - if len(request.EKSConfig.NodeGroups) == 0 { - return errors.New("at least 1 node group is required when using EKS") + if request.NodeRoleARN == "" { + return errors.New("node role ARN for EKS cluster cannot be empty") + } + if request.AMI == "" { + return errors.New("AMI for EKS cluster cannot be empty") + } + + if len(request.Zones) < 2 { + return errors.New("EKS cluster needs at least two zones") + } + + if request.AdditionalNodeGroups != nil { + if _, f := request.AdditionalNodeGroups[NodeGroupWorker]; f { + return errors.New("additional node group name cannot be named worker") + } + + for name, ng := range request.AdditionalNodeGroups { + if ng.MinCount < 1 { + return errors.Errorf("node min count (%d) must be 1 or greater for node group %s", ng.MinCount, name) + } + if ng.MaxCount != ng.MinCount { + return errors.Errorf("node min (%d) and max (%d) counts must match for node group %s", ng.MinCount, ng.MaxCount, name) + } + } } + + for _, ng := range request.NodeGroupWithPublicSubnet { + if ng == NodeGroupWorker { + continue + } + if _, f := request.AdditionalNodeGroups[ng]; !f { + return errors.Errorf("invalid nodegroup %s to use public subnets", ng) + } + } + + for _, ng := range request.NodeGroupWithSecurityGroup { + if ng == NodeGroupWorker { + continue + } + if _, f := request.AdditionalNodeGroups[ng]; !f { + return errors.Errorf("invalid nodegroup %s to use security group", ng) + } + } + } if !contains(GetSupportedCniList(), request.Networking) { @@ -222,7 +279,7 @@ func NewUpdateClusterRequestFromReader(reader io.Reader) (*UpdateClusterRequest, // PatchUpgradeClusterRequest specifies the parameters for upgrading a cluster. type PatchUpgradeClusterRequest struct { Version *string `json:"version,omitempty"` - KopsAMI *string `json:"kops-ami,omitempty"` + AMI *string `json:"ami,omitempty"` RotatorConfig *RotatorConfig `json:"rotatorConfig,omitempty"` MaxPodsPerNode *int64 } @@ -245,36 +302,6 @@ func (p *PatchUpgradeClusterRequest) Validate() error { return nil } -// Apply applies the patch to the given cluster's metadata. -func (p *PatchUpgradeClusterRequest) Apply(metadata *KopsMetadata) bool { - changes := &KopsMetadataRequestedState{} - - var applied bool - if p.Version != nil && *p.Version != metadata.Version { - applied = true - changes.Version = *p.Version - } - if p.KopsAMI != nil && *p.KopsAMI != metadata.AMI { - applied = true - changes.AMI = *p.KopsAMI - } - if p.MaxPodsPerNode != nil && *p.MaxPodsPerNode != metadata.MaxPodsPerNode { - applied = true - changes.MaxPodsPerNode = *p.MaxPodsPerNode - } - - if metadata.RotatorRequest == nil { - metadata.RotatorRequest = &RotatorMetadata{} - } - - if applied { - metadata.ChangeRequest = changes - metadata.RotatorRequest.Config = p.RotatorConfig - } - - return applied -} - // NewUpgradeClusterRequestFromReader will create an UpgradeClusterRequest from an io.Reader with JSON data. func NewUpgradeClusterRequestFromReader(reader io.Reader) (*PatchUpgradeClusterRequest, error) { var upgradeClusterRequest PatchUpgradeClusterRequest @@ -297,6 +324,7 @@ type PatchClusterSizeRequest struct { NodeMinCount *int64 `json:"node-min-count,omitempty"` NodeMaxCount *int64 `json:"node-max-count,omitempty"` RotatorConfig *RotatorConfig `json:"rotatorConfig,omitempty"` + NodeGroups []string `json:"nodeGroups,omitempty"` } // Validate validates the values of a PatchClusterSizeRequest. @@ -321,36 +349,6 @@ func (p *PatchClusterSizeRequest) Validate() error { return nil } -// Apply applies the patch to the given cluster's kops metadata. -func (p *PatchClusterSizeRequest) Apply(metadata *KopsMetadata) bool { - changes := &KopsMetadataRequestedState{} - - var applied bool - if p.NodeInstanceType != nil && *p.NodeInstanceType != metadata.NodeInstanceType { - applied = true - changes.NodeInstanceType = *p.NodeInstanceType - } - if p.NodeMinCount != nil && *p.NodeMinCount != metadata.NodeMinCount { - applied = true - changes.NodeMinCount = *p.NodeMinCount - } - if p.NodeMaxCount != nil && *p.NodeMaxCount != metadata.NodeMaxCount { - applied = true - changes.NodeMaxCount = *p.NodeMaxCount - } - - if metadata.RotatorRequest == nil { - metadata.RotatorRequest = &RotatorMetadata{} - } - - if applied { - metadata.ChangeRequest = changes - metadata.RotatorRequest.Config = p.RotatorConfig - } - - return applied -} - // NewResizeClusterRequestFromReader will create an PatchClusterSizeRequest from an io.Reader with JSON data. func NewResizeClusterRequestFromReader(reader io.Reader) (*PatchClusterSizeRequest, error) { var patchClusterSizeRequest PatchClusterSizeRequest @@ -383,3 +381,75 @@ func NewProvisionClusterRequestFromReader(reader io.Reader) (*ProvisionClusterRe return &provisionClusterRequest, nil } + +type CreateNodegroupsRequest struct { + Nodegroups map[string]NodeGroupMetadata `json:"nodegroups"` + NodeGroupWithPublicSubnet []string `json:"nodegroup-with-public-subnet,omitempty"` + NodeGroupWithSecurityGroup []string `json:"nodegroup-with-sg,omitempty"` +} + +// SetDefaults sets default values for nodegroups. +func (request *CreateNodegroupsRequest) SetDefaults() { + for ng, meta := range request.Nodegroups { + if len(meta.InstanceType) == 0 { + meta.InstanceType = "m5.large" + } + if meta.MinCount == 0 { + meta.MinCount = 2 + } + if meta.MaxCount == 0 { + meta.MaxCount = meta.MinCount + } + + request.Nodegroups[ng] = meta + } +} + +// Validate validates the values of a nodegroup creation request. +func (request *CreateNodegroupsRequest) Validate() error { + for ng, meta := range request.Nodegroups { + if meta.MinCount < 1 { + return errors.Errorf("nodegroup %s min count has to be 1 or greater", ng) + } + if meta.MaxCount < meta.MinCount { + return errors.Errorf("nodegroup %s max count (%d) can't be less than min count (%d)", ng, meta.MaxCount, meta.MinCount) + } + } + + for _, ng := range request.NodeGroupWithPublicSubnet { + if ng == NodeGroupWorker { + continue + } + if _, f := request.Nodegroups[ng]; !f { + return errors.Errorf("invalid nodegroup %s to use public subnets", ng) + } + } + + for _, ng := range request.NodeGroupWithSecurityGroup { + if ng == NodeGroupWorker { + continue + } + if _, f := request.Nodegroups[ng]; !f { + return errors.Errorf("invalid nodegroup %s to use security group", ng) + } + } + + return nil +} + +// NewCreateNodegroupsRequestFromReader will create an CreateNodegroupsRequest from an io.Reader with JSON data. +func NewCreateNodegroupsRequestFromReader(reader io.Reader) (*CreateNodegroupsRequest, error) { + var createNodegroupsRequest CreateNodegroupsRequest + err := json.NewDecoder(reader).Decode(&createNodegroupsRequest) + if err != nil && err != io.EOF { + return nil, errors.Wrap(err, "failed to decode create nodegroups request") + } + + createNodegroupsRequest.SetDefaults() + err = createNodegroupsRequest.Validate() + if err != nil { + return nil, errors.Wrap(err, "create nodegroups request failed validation") + } + + return &createNodegroupsRequest, nil +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_states.go b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_states.go index 2837ab35..108353db 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_states.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_states.go @@ -11,6 +11,10 @@ const ( ClusterStateCreationRequested = "creation-requested" // ClusterStateCreationInProgress is a cluster that is being actively created. ClusterStateCreationInProgress = "creation-in-progress" + // ClusterStateWaitingForNodes is a cluster that is waiting for nodes to be ready + ClusterStateWaitingForNodes = "waiting-for-nodes" + // ClusterStateProvisionInProgress is a cluster in the process of being provisioned. + ClusterStateProvisionInProgress = "provision-in-progress" // ClusterStateCreationFailed is a cluster that failed creation. ClusterStateCreationFailed = "creation-failed" // ClusterStateProvisioningRequested is a cluster in the process of being @@ -28,6 +32,10 @@ const ( ClusterStateResizeRequested = "resize-requested" // ClusterStateResizeFailed is a cluster that failed to resize. ClusterStateResizeFailed = "resize-failed" + // ClusterStateNodegroupsCreationRequested is a cluster in the process of creating nodegroups. + ClusterStateNodegroupsCreationRequested = "nodegroups-creation-requested" + // ClusterStateNodegroupsCreationFailed is a cluster that failed to create nodegroups. + ClusterStateNodegroupsCreationFailed = "nodegroups-creation-failed" // ClusterStateDeletionRequested is a cluster in the process of being deleted. ClusterStateDeletionRequested = "deletion-requested" // ClusterStateDeletionFailed is a cluster that failed deletion. @@ -44,9 +52,13 @@ var AllClusterStates = []string{ ClusterStateRefreshMetadata, ClusterStateCreationRequested, ClusterStateCreationInProgress, + ClusterStateWaitingForNodes, + ClusterStateProvisionInProgress, ClusterStateCreationFailed, ClusterStateProvisioningRequested, ClusterStateProvisioningFailed, + ClusterStateNodegroupsCreationRequested, + ClusterStateNodegroupsCreationFailed, ClusterStateUpgradeRequested, ClusterStateUpgradeFailed, ClusterStateResizeRequested, @@ -64,18 +76,23 @@ var AllClusterStates = []string{ var AllClusterStatesPendingWork = []string{ ClusterStateCreationRequested, ClusterStateCreationInProgress, + ClusterStateWaitingForNodes, + ClusterStateProvisionInProgress, ClusterStateProvisioningRequested, ClusterStateRefreshMetadata, ClusterStateUpgradeRequested, ClusterStateResizeRequested, + ClusterStateNodegroupsCreationRequested, ClusterStateDeletionRequested, } // ClusterStateWorkPriority is a map of states to their priority. Default priority is 0. // States with higher priority will be processed first. var ClusterStateWorkPriority = map[string]int{ - ClusterStateCreationRequested: 2, - ClusterStateCreationInProgress: 1, + ClusterStateCreationRequested: 4, + ClusterStateCreationInProgress: 3, + ClusterStateWaitingForNodes: 2, + ClusterStateProvisionInProgress: 1, } // AllClusterRequestStates is a list of all states that a cluster can be put in @@ -88,6 +105,7 @@ var AllClusterRequestStates = []string{ ClusterStateProvisioningRequested, ClusterStateUpgradeRequested, ClusterStateResizeRequested, + ClusterStateNodegroupsCreationRequested, ClusterStateDeletionRequested, } @@ -123,6 +141,11 @@ var ( ClusterStateResizeRequested, ClusterStateResizeFailed, }, + ClusterStateNodegroupsCreationRequested: { + ClusterStateStable, + ClusterStateNodegroupsCreationRequested, + ClusterStateNodegroupsCreationFailed, + }, ClusterStateDeletionRequested: { ClusterStateStable, ClusterStateCreationRequested, diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_utility.go b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_utility.go index 12df425b..135b411a 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/cluster_utility.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/cluster_utility.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "io" + "strings" ) const ( @@ -62,15 +63,15 @@ var DefaultUtilityVersions map[string]*HelmUtilityVersion = map[string]*HelmUtil // PrometheusOperatorCanonicalName defines the default version and values path for the Helm chart PrometheusOperatorCanonicalName: {Chart: "40.5.0", ValuesPath: ""}, // ThanosCanonicalName defines the default version and values path for the Helm chart - ThanosCanonicalName: {Chart: "10.5.4", ValuesPath: ""}, + ThanosCanonicalName: {Chart: "11.5.4", ValuesPath: ""}, // NginxCanonicalName defines the default version and values path for the Helm chart - NginxCanonicalName: {Chart: "4.2.0", ValuesPath: ""}, + NginxCanonicalName: {Chart: "4.5.2", ValuesPath: ""}, // NginxInternalCanonicalName defines the default version and values path for the Helm chart - NginxInternalCanonicalName: {Chart: "4.2.0", ValuesPath: ""}, + NginxInternalCanonicalName: {Chart: "4.5.2", ValuesPath: ""}, // FluentbitCanonicalName defines the default version and values path for the Helm chart FluentbitCanonicalName: {Chart: "0.20.1", ValuesPath: ""}, // TeleportCanonicalName defines the default version and values path for the Helm chart - TeleportCanonicalName: {Chart: "6.2.8", ValuesPath: ""}, + TeleportCanonicalName: {Chart: "7.3.26", ValuesPath: ""}, // PgbouncerCanonicalName defines the default version and values path for the Helm chart PgbouncerCanonicalName: {Chart: "1.2.0", ValuesPath: ""}, // PromtailCanonicalName defines the default version and values path for the Helm chart @@ -78,11 +79,11 @@ var DefaultUtilityVersions map[string]*HelmUtilityVersion = map[string]*HelmUtil // RtcdCanonicalName defines the default version and values path for the Helm chart RtcdCanonicalName: {Chart: "1.1.0", ValuesPath: ""}, // NodeProblemDetectorCanonicalName defines the default version and values path for the Helm chart - NodeProblemDetectorCanonicalName: {Chart: "2.0.5", ValuesPath: ""}, + NodeProblemDetectorCanonicalName: {Chart: "2.3.2", ValuesPath: ""}, // MetricsServerCanonicalName defines the default version and values path for the Helm chart - MetricsServerCanonicalName: {Chart: "3.8.2", ValuesPath: ""}, + MetricsServerCanonicalName: {Chart: "3.8.3", ValuesPath: ""}, // VeleroCanonicalName defines the default version for the Helm chart - VeleroCanonicalName: {Chart: "2.31.3", ValuesPath: ""}, + VeleroCanonicalName: {Chart: "3.1.2", ValuesPath: ""}, // CloudproberCanonicalName defines the default version for the Helm chart CloudproberCanonicalName: {Chart: "0.1.1", ValuesPath: ""}, } @@ -312,6 +313,17 @@ type HelmUtilityVersion struct { ValuesPath string } +// UnmarshalJSON tries to unmarshal the HelmUtilityVersion from JSON +// If it fails, it assumes that bytes is just the chart version in string +func (u *HelmUtilityVersion) UnmarshalJSON(bytes []byte) error { + type newHelmUtilityVersion HelmUtilityVersion + err := json.Unmarshal(bytes, (*newHelmUtilityVersion)(u)) + if err != nil { + u.Chart = strings.Trim(string(bytes), `"`) + } + return nil +} + // Version returns the Helm chart version func (u *HelmUtilityVersion) Version() string { return u.Chart diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/dtos.go b/vendor/github.com/mattermost/mattermost-cloud/model/dtos.go new file mode 100644 index 00000000..e15c2200 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-cloud/model/dtos.go @@ -0,0 +1,33 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. +// + +package model + +import ( + "encoding/json" + "io" +) + +func DTOFromReader[T any](reader io.Reader) (*T, error) { + var dto T + decoder := json.NewDecoder(reader) + err := decoder.Decode(&dto) + if err != nil && err != io.EOF { + return nil, err + } + + return &dto, nil +} + +func DTOsFromReader[T any](reader io.Reader) ([]*T, error) { + dtos := []*T{} + decoder := json.NewDecoder(reader) + + err := decoder.Decode(&dtos) + if err != nil && err != io.EOF { + return nil, err + } + + return dtos, nil +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/eks_metadata.go b/vendor/github.com/mattermost/mattermost-cloud/model/eks_metadata.go new file mode 100644 index 00000000..734f956c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-cloud/model/eks_metadata.go @@ -0,0 +1,384 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. +// + +package model + +import ( + "encoding/json" + "fmt" + + "github.com/pkg/errors" +) + +const ( + ProvisionerEKS = "eks" + NodeGroupWorker = "worker" +) + +// EKSMetadata is metadata for EKS cluster and node groups. +type EKSMetadata struct { + Name string + Version string + AMI string + VPC string + Networking string + ClusterRoleARN string + NodeRoleARN string + MaxPodsPerNode int64 + NodeGroups map[string]NodeGroupMetadata + ChangeRequest *EKSMetadataRequestedState `json:"ChangeRequest,omitempty"` + Warnings []string `json:"Warnings,omitempty"` +} + +// NodeGroupMetadata is the metadata of an instance group. +type NodeGroupMetadata struct { + Name string + Type string `json:"Type,omitempty"` + InstanceType string `json:"InstanceType,omitempty"` + MinCount int64 `json:"MinCount,omitempty"` + MaxCount int64 `json:"MaxCount,omitempty"` + WithPublicSubnet bool `json:"WithPublicSubnet,omitempty"` + WithSecurityGroup bool `json:"WithSecurityGroup,omitempty"` +} + +// EKSMetadataRequestedState is the requested state for eks metadata. +type EKSMetadataRequestedState struct { + Version string `json:"Version,omitempty"` + AMI string `json:"AMI,omitempty"` + MaxPodsPerNode int64 `json:"MaxPodsPerNode,omitempty"` + Networking string `json:"Networking,omitempty"` + VPC string `json:"VPC,omitempty"` + ClusterRoleARN string `json:"ClusterRoleARN,omitempty"` + NodeRoleARN string `json:"NodeRoleARN,omitempty"` + NodeGroups map[string]NodeGroupMetadata `json:"NodeGroups,omitempty"` +} + +// CopyMissingFieldsFrom copy empty fields from the given NodeGroupMetadata to the current metadata. +func (ng *NodeGroupMetadata) CopyMissingFieldsFrom(other NodeGroupMetadata) { + if len(ng.Type) == 0 { + ng.Type = other.Type + } + if ng.InstanceType == "" { + ng.InstanceType = other.InstanceType + } + if ng.MinCount == 0 { + ng.MinCount = other.MinCount + } + if ng.MaxCount == 0 { + ng.MaxCount = other.MaxCount + } + if !ng.WithPublicSubnet { + ng.WithPublicSubnet = other.WithPublicSubnet + } + if !ng.WithSecurityGroup { + ng.WithSecurityGroup = other.WithSecurityGroup + } +} + +func (em *EKSMetadata) ApplyClusterCreateRequest(createRequest *CreateClusterRequest) bool { + + em.ChangeRequest = &EKSMetadataRequestedState{ + Version: createRequest.Version, + AMI: createRequest.AMI, + MaxPodsPerNode: createRequest.MaxPodsPerNode, + VPC: createRequest.VPC, + ClusterRoleARN: createRequest.ClusterRoleARN, + NodeRoleARN: createRequest.NodeRoleARN, + NodeGroups: map[string]NodeGroupMetadata{}, + } + + nodeGroups := createRequest.AdditionalNodeGroups + if nodeGroups == nil { + nodeGroups = map[string]NodeGroupMetadata{} + } + + nodeGroups[NodeGroupWorker] = NodeGroupMetadata{ + InstanceType: createRequest.NodeInstanceType, + MinCount: createRequest.NodeMinCount, + MaxCount: createRequest.NodeMaxCount, + } + + for _, ng := range createRequest.NodeGroupWithPublicSubnet { + nodeGroup := nodeGroups[ng] + nodeGroup.WithPublicSubnet = true + nodeGroups[ng] = nodeGroup + } + + for _, ng := range createRequest.NodeGroupWithSecurityGroup { + nodeGroup := nodeGroups[ng] + nodeGroup.WithSecurityGroup = true + nodeGroups[ng] = nodeGroup + } + + for name, ng := range nodeGroups { + em.ChangeRequest.NodeGroups[name] = NodeGroupMetadata{ + Name: fmt.Sprintf("%s-%s", name, NewNodeGroupSuffix()), + Type: name, + InstanceType: ng.InstanceType, + MinCount: ng.MinCount, + MaxCount: ng.MaxCount, + WithPublicSubnet: ng.WithPublicSubnet, + WithSecurityGroup: ng.WithSecurityGroup, + } + } + + em.NodeGroups = map[string]NodeGroupMetadata{} + + return true +} + +// ApplyUpgradePatch applies the patch to the given cluster's metadata. +func (em *EKSMetadata) ApplyUpgradePatch(patchRequest *PatchUpgradeClusterRequest) bool { + changes := &EKSMetadataRequestedState{} + + var applied bool + if patchRequest.Version != nil && *patchRequest.Version != em.Version { + applied = true + changes.Version = *patchRequest.Version + } + if patchRequest.AMI != nil && *patchRequest.AMI != em.AMI { + applied = true + changes.AMI = *patchRequest.AMI + } + if patchRequest.MaxPodsPerNode != nil && *patchRequest.MaxPodsPerNode != em.MaxPodsPerNode { + applied = true + changes.MaxPodsPerNode = *patchRequest.MaxPodsPerNode + } + + if applied { + changes.NodeGroups = map[string]NodeGroupMetadata{} + for ng := range em.NodeGroups { + changes.NodeGroups[ng] = NodeGroupMetadata{ + Name: fmt.Sprintf("%s-%s", ng, NewNodeGroupSuffix()), + } + } + em.ChangeRequest = changes + } + + return applied +} + +func (em *EKSMetadata) ValidateClusterSizePatch(patchRequest *PatchClusterSizeRequest) error { + nodeGroups := patchRequest.NodeGroups + + if len(em.NodeGroups) == 0 { + return errors.New("no nodegroups available to resize") + } + + if len(nodeGroups) == 0 { + if len(em.NodeGroups) > 1 { + return errors.New("must specify nodegroups to resize") + } + for ng := range em.NodeGroups { + nodeGroups = append(nodeGroups, ng) + } + } + + for _, ngToResize := range nodeGroups { + if _, f := em.NodeGroups[ngToResize]; !f { + return errors.Errorf("nodegroup %s not found to resize", ngToResize) + } + } + + if patchRequest.NodeMinCount != nil && patchRequest.NodeMaxCount != nil { + if *patchRequest.NodeMinCount > *patchRequest.NodeMaxCount { + return errors.New("min node count cannot be greater than max node count") + } + return nil + } + + if patchRequest.NodeMinCount != nil { + for _, ngToResize := range nodeGroups { + ng := em.NodeGroups[ngToResize] + nodeMaxCount := ng.MaxCount + if *patchRequest.NodeMinCount > nodeMaxCount { + return errors.New("resize patch would set min node count higher than max node count") + } + } + } + + if patchRequest.NodeMaxCount != nil { + for _, ngToResize := range nodeGroups { + ng := em.NodeGroups[ngToResize] + nodeMinCount := ng.MinCount + if *patchRequest.NodeMaxCount < nodeMinCount { + return errors.New("resize patch would set max node count lower than min node count") + } + } + } + + return nil +} + +func (em *EKSMetadata) ApplyClusterSizePatch(patchRequest *PatchClusterSizeRequest) bool { + changes := &EKSMetadataRequestedState{ + NodeGroups: map[string]NodeGroupMetadata{}, + } + + var applied bool + + nodeGroupsMeta := patchRequest.NodeGroups + if len(nodeGroupsMeta) == 0 { + for ngPrefix := range em.NodeGroups { + nodeGroupsMeta = append(nodeGroupsMeta, ngPrefix) + } + } + + for _, ng := range nodeGroupsMeta { + ngChangeRequest := NodeGroupMetadata{ + Name: fmt.Sprintf("%s-%s", ng, NewNodeGroupSuffix()), + } + if patchRequest.NodeInstanceType != nil { + applied = true + ngChangeRequest.InstanceType = *patchRequest.NodeInstanceType + } + if patchRequest.NodeMinCount != nil { + applied = true + ngChangeRequest.MinCount = *patchRequest.NodeMinCount + } + if patchRequest.NodeMaxCount != nil { + applied = true + ngChangeRequest.MaxCount = *patchRequest.NodeMaxCount + } + + changes.NodeGroups[ng] = ngChangeRequest + } + + if applied { + em.ChangeRequest = changes + } + + return applied +} + +// ValidateChangeRequest ensures that the ChangeRequest has at least one +// actionable value. +func (em *EKSMetadata) ValidateChangeRequest() error { + changeRequest := em.ChangeRequest + if changeRequest == nil { + return errors.New("the EKS Metadata ChangeRequest is nil") + } + + changeAllowed := false + if len(changeRequest.Version) != 0 || len(changeRequest.AMI) != 0 || changeRequest.MaxPodsPerNode != 0 { + changeAllowed = true + } + + if changeAllowed { + return nil + } + + for _, ng := range changeRequest.NodeGroups { + if len(ng.InstanceType) != 0 || ng.MinCount != 0 || ng.MaxCount != 0 { + changeAllowed = true + break + } + } + + if !changeAllowed { + return errors.New("the EKS Metadata ChangeRequest has no change values set") + } + + return nil +} + +// ApplyChangeRequest applies change request values to the KopsMetadata that are +// not reflected by calling refreshKopsMetadata(). +func (em *EKSMetadata) ApplyChangeRequest() { +} + +// ValidateNodegroupsCreateRequest ensures that the nodegroups to create do not +// already exist. +func (em *EKSMetadata) ValidateNodegroupsCreateRequest(nodegroups map[string]NodeGroupMetadata) error { + if len(nodegroups) == 0 { + return errors.New("must specify at least one nodegroup to create") + } + + for ng := range nodegroups { + if _, f := em.NodeGroups[ng]; f { + return errors.Errorf("nodegroup %s already exists", ng) + } + } + + return nil +} + +// ApplyNodegroupsCreateRequest applies the nodegroups to create to the +// KopsMetadata. +func (em *EKSMetadata) ApplyNodegroupsCreateRequest(request *CreateNodegroupsRequest) { + em.ChangeRequest = &EKSMetadataRequestedState{ + NodeGroups: map[string]NodeGroupMetadata{}, + } + + nodeGroups := request.Nodegroups + + for _, ng := range request.NodeGroupWithPublicSubnet { + nodeGroup := nodeGroups[ng] + nodeGroup.WithPublicSubnet = true + nodeGroups[ng] = nodeGroup + } + + for _, ng := range request.NodeGroupWithSecurityGroup { + nodeGroup := nodeGroups[ng] + nodeGroup.WithSecurityGroup = true + nodeGroups[ng] = nodeGroup + } + + for name, ng := range nodeGroups { + em.ChangeRequest.NodeGroups[name] = NodeGroupMetadata{ + Name: fmt.Sprintf("%s-%s", name, NewNodeGroupSuffix()), + Type: name, + InstanceType: ng.InstanceType, + MinCount: ng.MinCount, + MaxCount: ng.MaxCount, + WithPublicSubnet: ng.WithPublicSubnet, + WithSecurityGroup: ng.WithSecurityGroup, + } + } + +} + +func (em *EKSMetadata) GetCommonMetadata() ProvisionerMetadata { + workerNodeGroup := em.NodeGroups[NodeGroupWorker] + return ProvisionerMetadata{ + Name: em.Name, + Version: em.Version, + AMI: em.AMI, + NodeInstanceType: workerNodeGroup.InstanceType, + NodeMinCount: workerNodeGroup.MinCount, + NodeMaxCount: workerNodeGroup.MaxCount, + MaxPodsPerNode: em.MaxPodsPerNode, + VPC: em.VPC, + Networking: em.Networking, + } +} + +// ClearChangeRequest clears the kops metadata change request. +func (em *EKSMetadata) ClearChangeRequest() { + em.ChangeRequest = nil +} + +// ClearWarnings clears the kops metadata warnings. +func (em *EKSMetadata) ClearWarnings() { + em.Warnings = []string{} +} + +// NewEKSMetadata creates an instance of EKSMetadata given the raw provisioner metadata. +func NewEKSMetadata(metadataBytes []byte) (*EKSMetadata, error) { + // Check if length of metadata is 0 as opposed to if the value is nil. This + // is done to avoid an issue encountered where the metadata value provided + // had a length of 0, but had non-zero capacity. + if len(metadataBytes) == 0 || string(metadataBytes) == "null" { + // TODO: remove "null" check after sqlite is gone. + return nil, nil + } + + eksMetadata := EKSMetadata{} + err := json.Unmarshal(metadataBytes, &eksMetadata) + if err != nil { + return nil, err + } + + return &eksMetadata, nil +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/env.go b/vendor/github.com/mattermost/mattermost-cloud/model/env.go index 06e46025..f0030574 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/env.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/env.go @@ -51,24 +51,24 @@ func (em *EnvVarMap) Validate() error { // EnvVarMap with the following logic: // - If the new EnvVarMap is empty, clear the existing EnvVarMap completely. // - If the new EnvVarMap is not empty, apply normal patch logic. -func (em *EnvVarMap) ClearOrPatch(new *EnvVarMap) bool { +func (em *EnvVarMap) ClearOrPatch(input *EnvVarMap) bool { if *em == nil { - if len(*new) == 0 { + if len(*input) == 0 { return false } - *em = *new + *em = *input return true } - if len(*new) == 0 { + if len(*input) == 0 { originalEmpty := len(*em) != 0 *em = nil return originalEmpty } - return em.Patch(*new) + return em.Patch(*input) } // Patch takes a new EnvVarMap and patches changes into the existing EnvVarMap @@ -77,13 +77,13 @@ func (em *EnvVarMap) ClearOrPatch(new *EnvVarMap) bool { // - If the new EnvVar is a new key, add the EnvVar. // - If the new EnvVar has no value(is blank), clear the old EnvVar if there // was one. -func (em EnvVarMap) Patch(new EnvVarMap) bool { - if new == nil { +func (em EnvVarMap) Patch(input EnvVarMap) bool { + if input == nil { return false } var wasPatched bool - for newName, newEnv := range new { + for newName, newEnv := range input { if oldEnv, ok := em[newName]; ok { // This EnVar exists already. Delete it or update it if the patch // value is different. diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/events_state_change.go b/vendor/github.com/mattermost/mattermost-cloud/model/events_state_change.go index c293d0fd..c54ff6ad 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/events_state_change.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/events_state_change.go @@ -30,6 +30,7 @@ type StateChangeEventData struct { type StateChangeEventDeliveryData struct { EventDelivery EventDelivery EventData StateChangeEventData + EventHeaders Headers } // StateChangeEventPayload represents payload that is sent to consumers. diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/events_subscription.go b/vendor/github.com/mattermost/mattermost-cloud/model/events_subscription.go index 94d2f166..a9c35c77 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/events_subscription.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/events_subscription.go @@ -39,6 +39,7 @@ type Subscription struct { DeleteAt int64 LockAcquiredBy *string LockAcquiredAt int64 + Headers Headers } // IsDeleted returns true if subscription is deleted. diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/events_subscription_request.go b/vendor/github.com/mattermost/mattermost-cloud/model/events_subscription_request.go index ce3486a2..7f56bb19 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/events_subscription_request.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/events_subscription_request.go @@ -20,6 +20,7 @@ type CreateSubscriptionRequest struct { OwnerID string EventType EventType FailureThreshold time.Duration + Headers Headers } // ToSubscription validates request and converts it to subscription @@ -28,7 +29,6 @@ func (r CreateSubscriptionRequest) ToSubscription() (Subscription, error) { if err != nil { return Subscription{}, errors.Wrap(err, "failed to parse subscription URL") } - if r.EventType == "" { return Subscription{}, errors.New("event type is required when registering subscription") } @@ -47,6 +47,7 @@ func (r CreateSubscriptionRequest) ToSubscription() (Subscription, error) { LastDeliveryStatus: SubscriptionDeliveryNone, LastDeliveryAttemptAt: 0, FailureThreshold: r.FailureThreshold, + Headers: r.Headers, }, nil } diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/group_dto.go b/vendor/github.com/mattermost/mattermost-cloud/model/group_dto.go index a834b738..224d4743 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/group_dto.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/group_dto.go @@ -4,11 +4,6 @@ package model -import ( - "encoding/json" - "io" -) - // GroupDTO is a group with Annotations. type GroupDTO struct { *Group @@ -20,28 +15,3 @@ type GroupDTO struct { func (g GroupDTO) GetInstallationCount() int64 { return *g.InstallationCount } - -// GroupDTOFromReader decodes a json-encoded group DTO from the given io.Reader. -func GroupDTOFromReader(reader io.Reader) (*GroupDTO, error) { - groupDTO := GroupDTO{} - decoder := json.NewDecoder(reader) - err := decoder.Decode(&groupDTO) - if err != nil && err != io.EOF { - return nil, err - } - - return &groupDTO, nil -} - -// GroupDTOsFromReader decodes a json-encoded list of group DTOs from the given io.Reader. -func GroupDTOsFromReader(reader io.Reader) ([]*GroupDTO, error) { - groupDTOs := []*GroupDTO{} - decoder := json.NewDecoder(reader) - - err := decoder.Decode(&groupDTOs) - if err != nil && err != io.EOF { - return nil, err - } - - return groupDTOs, nil -} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/id.go b/vendor/github.com/mattermost/mattermost-cloud/model/id.go index 167a1b7b..d36bed30 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/id.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/id.go @@ -39,3 +39,8 @@ func ClusterNewID() string { } return strID } + +// NewNodeGroupSuffix is a globally unique identifier for nodegroup ID which start with a letter. It is a [a-z0-9] string 6 +func NewNodeGroupSuffix() string { + return NewID()[:6] +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/installation.go b/vendor/github.com/mattermost/mattermost-cloud/model/installation.go index 568aa480..c7ef8430 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/installation.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/installation.go @@ -46,6 +46,7 @@ type Installation struct { CRVersion string CreateAt int64 DeleteAt int64 + DeletionPendingExpiry int64 `json:"DeletionPendingExpiry,omitempty"` APISecurityLock bool LockAcquiredBy *string LockAcquiredAt int64 @@ -104,7 +105,7 @@ func (i *Installation) ToDTO(annotations []*Annotation, dnsRecords []*Installati // CreationDateString returns a standardized date string for an installation's // creation. func (i *Installation) CreationDateString() string { - return GetDateString(i.CreateAt) + return DateStringFromMillis(i.CreateAt) } // DeletionDateString returns a standardized date string for an installation's @@ -114,7 +115,17 @@ func (i *Installation) DeletionDateString() string { return "n/a" } - return TimeFromMillis(i.DeleteAt).Format("Jan 2 2006") + return DateStringFromMillis(i.DeleteAt) +} + +// DeletionPendingExpiryCompleteTimeString returns a standardized time string for +// an installation's deletion or 'n/a' if not pending deletion. +func (i *Installation) DeletionPendingExpiryCompleteTimeString() string { + if i.DeletionPendingExpiry == 0 { + return "n/a" + } + + return DateTimeStringFromMillis(i.DeletionPendingExpiry) } // GetDatabaseWeight returns a value corresponding to the diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/installation_database.go b/vendor/github.com/mattermost/mattermost-cloud/model/installation_database.go index 06d77002..be5ed20f 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/installation_database.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/installation_database.go @@ -94,7 +94,6 @@ type InstallationDatabaseStoreInterface interface { GetProxyDatabaseResourcesForInstallation(installationID string) (*DatabaseResourceGrouping, error) GetOrCreateProxyDatabaseResourcesForInstallation(installationID, multitenantDatabaseID string) (*DatabaseResourceGrouping, error) DeleteInstallationProxyDatabaseResources(multitenantDatabase *MultitenantDatabase, databaseSchema *DatabaseSchema) error - GetGroupDTOs(filter *GroupFilter) ([]*GroupDTO, error) } // ClusterUtilityDatabaseStoreInterface is the interface necessary for SQLStore diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/installation_dto.go b/vendor/github.com/mattermost/mattermost-cloud/model/installation_dto.go index df240745..c6610f61 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/installation_dto.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/installation_dto.go @@ -4,11 +4,6 @@ package model -import ( - "encoding/json" - "io" -) - // InstallationDTO represents a Mattermost installation. DTO stands for Data Transfer Object. type InstallationDTO struct { *Installation @@ -17,28 +12,3 @@ type InstallationDTO struct { DNS string DNSRecords []*InstallationDNS } - -// InstallationDTOFromReader decodes a json-encoded installation DTO from the given io.Reader. -func InstallationDTOFromReader(reader io.Reader) (*InstallationDTO, error) { - installationDTO := InstallationDTO{} - decoder := json.NewDecoder(reader) - err := decoder.Decode(&installationDTO) - if err != nil && err != io.EOF { - return nil, err - } - - return &installationDTO, nil -} - -// InstallationDTOsFromReader decodes a json-encoded list of installation DTOs from the given io.Reader. -func InstallationDTOsFromReader(reader io.Reader) ([]*InstallationDTO, error) { - installationDTOs := []*InstallationDTO{} - decoder := json.NewDecoder(reader) - - err := decoder.Decode(&installationDTOs) - if err != nil && err != io.EOF { - return nil, err - } - - return installationDTOs, nil -} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/installation_request.go b/vendor/github.com/mattermost/mattermost-cloud/model/installation_request.go index 0a3a0c64..e394930c 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/installation_request.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/installation_request.go @@ -10,6 +10,7 @@ import ( "net/url" "regexp" "strings" + "time" "github.com/pkg/errors" ) @@ -149,9 +150,9 @@ func (request *CreateInstallationRequest) Validate() error { } for _, ann := range request.GroupSelectionAnnotations { - err := validateAnnotationName(ann) - if err != nil { - return errors.Wrap(err, "invalid group selection annotation") + errInner := validateAnnotationName(ann) + if errInner != nil { + return errors.Wrap(errInner, "invalid group selection annotation") } } @@ -397,6 +398,56 @@ func NewPatchInstallationRequestFromReader(reader io.Reader) (*PatchInstallation return &patchInstallationRequest, nil } +// PatchInstallationDeletionRequest specifies the parameters for an updating +// installation deletion parameters. +type PatchInstallationDeletionRequest struct { + DeletionPendingExpiry *int64 +} + +// Validate validates the values of a installation deletion patch request. +func (p *PatchInstallationDeletionRequest) Validate() error { + if p.DeletionPendingExpiry != nil { + // DeletionPendingExpiry is the new time when an installation pending + // deletion can be deleted. This can be any time from "now" into the + // future. The cuttoff for "now" will be the current time with a 5 second + // buffer. Any time value lower than that will be considered an error. + cutoffTimeMillis := GetMillisAtTime(time.Now().Add(-5 * time.Second)) + if cutoffTimeMillis > *p.DeletionPendingExpiry { + return errors.Errorf("DeletionPendingExpiry must be %d or higher", cutoffTimeMillis) + } + } + + return nil +} + +// Apply applies the deletion patch to the given installation. +func (p *PatchInstallationDeletionRequest) Apply(installation *Installation) bool { + var applied bool + + if p.DeletionPendingExpiry != nil && *p.DeletionPendingExpiry != installation.DeletionPendingExpiry { + applied = true + installation.DeletionPendingExpiry = *p.DeletionPendingExpiry + } + + return applied +} + +// NewPatchInstallationDeletionRequestFromReader will create a PatchInstallationDeletionRequest from an io.Reader with JSON data. +func NewPatchInstallationDeletionRequestFromReader(reader io.Reader) (*PatchInstallationDeletionRequest, error) { + var patchInstallationDeletionRequest PatchInstallationDeletionRequest + err := json.NewDecoder(reader).Decode(&patchInstallationDeletionRequest) + if err != nil && err != io.EOF { + return nil, errors.Wrap(err, "failed to decode patch installation deletion request") + } + + err = patchInstallationDeletionRequest.Validate() + if err != nil { + return nil, errors.Wrap(err, "invalid patch installation deletion request") + } + + return &patchInstallationDeletionRequest, nil +} + // AssignInstallationGroupRequest specifies request body for installation group assignment. type AssignInstallationGroupRequest struct { GroupSelectionAnnotations []string diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/installation_states.go b/vendor/github.com/mattermost/mattermost-cloud/model/installation_states.go index cd2a0493..4bb3f407 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/installation_states.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/installation_states.go @@ -221,6 +221,9 @@ var ( InstallationStateDeletionPendingRequested, InstallationStateDeletionPendingInProgress, }, + InstallationStateDeletionPending: { + InstallationStateDeletionPending, + }, InstallationStateDeletionCancellationRequested: { InstallationStateDeletionPending, }, diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/kops_metadata.go b/vendor/github.com/mattermost/mattermost-cloud/model/kops_metadata.go index 2bce9d04..facacb9a 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/kops_metadata.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/kops_metadata.go @@ -13,6 +13,10 @@ import ( "github.com/pkg/errors" ) +const ( + ProvisionerKops = "kops" +) + // KopsMetadata is the provisioner metadata stored in a model.Cluster. type KopsMetadata struct { Name string @@ -108,7 +112,7 @@ func (r RotatorConfig) Validate() error { // actionable value. func (km *KopsMetadata) ValidateChangeRequest() error { if km.ChangeRequest == nil { - return errors.New("the KopsMetadata ChangeRequest is nil") + return errors.New("the Kops Metadata ChangeRequest is nil") } if len(km.ChangeRequest.Version) == 0 && @@ -118,7 +122,7 @@ func (km *KopsMetadata) ValidateChangeRequest() error { km.MasterCount == 0 && km.NodeMinCount == 0 && km.NodeMaxCount == 0 { - return errors.New("the KopsMetadata ChangeRequest has no change values set") + return errors.New("the Kops Metadata ChangeRequest has no change values set") } return nil @@ -283,6 +287,105 @@ func (km *KopsMetadata) AddWarning(warning string) { km.Warnings = append(km.Warnings, warning) } +func (km *KopsMetadata) ApplyClusterCreateRequest(createRequest *CreateClusterRequest) bool { + km.ChangeRequest = &KopsMetadataRequestedState{ + Version: createRequest.Version, + AMI: createRequest.AMI, + MasterInstanceType: createRequest.MasterInstanceType, + MasterCount: createRequest.MasterCount, + NodeInstanceType: createRequest.NodeInstanceType, + NodeMinCount: createRequest.NodeMinCount, + NodeMaxCount: createRequest.NodeMaxCount, + MaxPodsPerNode: createRequest.MaxPodsPerNode, + Networking: createRequest.Networking, + VPC: createRequest.VPC, + } + return true +} + +// ApplyUpgradePatch applies the patch to the given cluster's metadata. +func (km *KopsMetadata) ApplyUpgradePatch(patchRequest *PatchUpgradeClusterRequest) bool { + changes := &KopsMetadataRequestedState{} + + var applied bool + if patchRequest.Version != nil && *patchRequest.Version != km.Version { + applied = true + changes.Version = *patchRequest.Version + } + if patchRequest.AMI != nil && *patchRequest.AMI != km.AMI { + applied = true + changes.AMI = *patchRequest.AMI + } + if patchRequest.MaxPodsPerNode != nil && *patchRequest.MaxPodsPerNode != km.MaxPodsPerNode { + applied = true + changes.MaxPodsPerNode = *patchRequest.MaxPodsPerNode + } + + if km.RotatorRequest == nil { + km.RotatorRequest = &RotatorMetadata{} + } + + if applied { + km.ChangeRequest = changes + km.RotatorRequest.Config = patchRequest.RotatorConfig + } + + return applied +} + +func (km *KopsMetadata) GetCommonMetadata() ProvisionerMetadata { + return ProvisionerMetadata{ + Name: km.Name, + Version: km.Version, + AMI: km.AMI, + NodeInstanceType: km.NodeInstanceType, + NodeMinCount: km.NodeMinCount, + NodeMaxCount: km.NodeMaxCount, + MaxPodsPerNode: km.MaxPodsPerNode, + VPC: km.VPC, + Networking: km.Networking, + } +} + +func (em *KopsMetadata) ValidateClusterSizePatch(patchRequest *PatchClusterSizeRequest) error { + // One more check that can't be done without both the request and the cluster. + if patchRequest.NodeMinCount == nil && patchRequest.NodeMaxCount != nil && + *patchRequest.NodeMaxCount < em.NodeMinCount { + return errors.New("resize patch would set max node count lower than min node count") + } + + return nil +} + +func (km *KopsMetadata) ApplyClusterSizePatch(patchRequest *PatchClusterSizeRequest) bool { + changes := &KopsMetadataRequestedState{} + + var applied bool + if patchRequest.NodeInstanceType != nil && *patchRequest.NodeInstanceType != km.NodeInstanceType { + applied = true + changes.NodeInstanceType = *patchRequest.NodeInstanceType + } + if patchRequest.NodeMinCount != nil && *patchRequest.NodeMinCount != km.NodeMinCount { + applied = true + changes.NodeMinCount = *patchRequest.NodeMinCount + } + if patchRequest.NodeMaxCount != nil && *patchRequest.NodeMaxCount != km.NodeMaxCount { + applied = true + changes.NodeMaxCount = *patchRequest.NodeMaxCount + } + + if km.RotatorRequest == nil { + km.RotatorRequest = &RotatorMetadata{} + } + + if applied { + km.ChangeRequest = changes + km.RotatorRequest.Config = patchRequest.RotatorConfig + } + + return applied +} + func (igm *KopsInstanceGroupsMetadata) getStableIterationOrder() []string { keys := make([]string, 0, len(*igm)) for k := range *igm { diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/multitenant_database.go b/vendor/github.com/mattermost/mattermost-cloud/model/multitenant_database.go index 07452111..1dd3b7d6 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/multitenant_database.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/multitenant_database.go @@ -55,7 +55,7 @@ type MultitenantDatabase struct { // CreationDateString returns a standardized date string for a multitenant // database string. func (d *MultitenantDatabase) CreationDateString() string { - return GetDateString(d.CreateAt) + return DateStringFromMillis(d.CreateAt) } // LogicalDatabase represents a logical database inside a MultitenantDatabase. diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/pgbouncer.go b/vendor/github.com/mattermost/mattermost-cloud/model/pgbouncer.go new file mode 100644 index 00000000..fcb6cfd6 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-cloud/model/pgbouncer.go @@ -0,0 +1,47 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. +// + +package model + +import "github.com/pkg/errors" + +// PGBouncerConfig contains the configuration for the PGBouncer utility. +// ////////////////////////////////////////////////////////////////////////////// +// - MaxDatabaseConnectionsPerPool is the maximum number of connections per +// logical database pool when using proxy databases. +// - MinPoolSize is the minimum pool size. +// - DefaultPoolSize is the default pool size per user. +// - ReservePoolSize is the default pool size per user. +// - MaxClientConnections is the maximum client connections. +// - ServerIdleTimeout is the server idle timeout. +// - ServerLifetime is the server lifetime. +// - ServerResetQueryAlways is boolean 0 or 1 whether server_reset_query should +// be run in all pooling modes. +// +// ////////////////////////////////////////////////////////////////////////////// +type PGBouncerConfig struct { + MinPoolSize int + DefaultPoolSize int + ReservePoolSize int + MaxClientConnections int + MaxDatabaseConnectionsPerPool int + ServerIdleTimeout int + ServerLifetime int + ServerResetQueryAlways int +} + +// Validate validates a PGBouncerConfig. +func (c *PGBouncerConfig) Validate() error { + if c.MaxDatabaseConnectionsPerPool < 1 { + return errors.New("MaxDatabaseConnectionsPerPool must be 1 or greater") + } + if c.DefaultPoolSize < 1 { + return errors.New("DefaultPoolSize must be 1 or greater") + } + if c.ServerResetQueryAlways != 0 && c.ServerResetQueryAlways != 1 { + return errors.New("ServerResetQueryAlways must be 0 or 1") + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/provisioner_metadata.go b/vendor/github.com/mattermost/mattermost-cloud/model/provisioner_metadata.go new file mode 100644 index 00000000..7a7665b0 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-cloud/model/provisioner_metadata.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. +// + +package model + +type ProvisionerMetadata struct { + Name string + Version string + AMI string + NodeInstanceType string + NodeMinCount int64 + NodeMaxCount int64 + MaxPodsPerNode int64 + VPC string + Networking string +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/time.go b/vendor/github.com/mattermost/mattermost-cloud/model/time.go index 87d834e3..6520301e 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/time.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/time.go @@ -8,7 +8,12 @@ import "time" // GetMillis is a convenience method to get milliseconds since epoch. func GetMillis() int64 { - return time.Now().UnixNano() / int64(time.Millisecond) + return GetMillisAtTime(time.Now()) +} + +// GetMillisAtTime returns millis for a given time. +func GetMillisAtTime(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) } // TimeFromMillis converts time in milliseconds to time.Time. @@ -16,11 +21,16 @@ func TimeFromMillis(millis int64) time.Time { return time.Unix(0, millis*int64(time.Millisecond)) } -// GetDateString returns a standard date string from millis. -func GetDateString(millis int64) string { +// DateStringFromMillis returns a standard date string from millis. +func DateStringFromMillis(millis int64) string { return TimeFromMillis(millis).Format("Jan 2 2006") } +// DateTimeStringFromMillis returns a standard complete time string from millis. +func DateTimeStringFromMillis(millis int64) string { + return TimeFromMillis(millis).Format("2006-01-02 15:04:05 MST") +} + // ElapsedTimeInSeconds returns time in seconds since the provided millis. func ElapsedTimeInSeconds(millis int64) float64 { return float64(GetMillis()-millis) / 1000 diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/types.go b/vendor/github.com/mattermost/mattermost-cloud/model/types.go new file mode 100644 index 00000000..6cc6736e --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-cloud/model/types.go @@ -0,0 +1,66 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. +// + +package model + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "os" +) + +type WebhookHeader struct { + Key string `json:"key"` + Value *string `json:"value,omitempty"` + ValueFromEnv *string `json:"value_from_env,omitempty"` +} + +type Headers []WebhookHeader + +func (wh Headers) Value() (driver.Value, error) { + return json.Marshal(wh) +} + +func (wh *Headers) Scan(databaseValue interface{}) error { + switch value := databaseValue.(type) { + case string: // sqlite's text + return json.Unmarshal([]byte(value), wh) + case []byte: // psqls jsonb + return json.Unmarshal(value, wh) + case nil: + return nil + default: + return fmt.Errorf("cannot scan type %t into Headers", databaseValue) + } +} + +func (wh Headers) Validate() error { + keys := make(map[string]struct{}, len(wh)) + for _, header := range wh { + if _, ok := keys[header.Key]; ok { + return fmt.Errorf("header %s is duplicated", header.Key) + } + keys[header.Key] = struct{}{} + if header.Value == nil && header.ValueFromEnv == nil { + return fmt.Errorf("header %s must have either a value or a value_from_env", header.Key) + } + if header.Value != nil && header.ValueFromEnv != nil { + return fmt.Errorf("header %s cannot have both a value and a value_from_env", header.Key) + } + } + return nil +} + +func (wh Headers) GetHeaders() map[string]string { + headers := make(map[string]string, len(wh)) + for _, header := range wh { + if header.Value != nil { + headers[header.Key] = *header.Value + } else if header.ValueFromEnv != nil { + headers[header.Key] = os.Getenv(*header.ValueFromEnv) + } + } + return headers +} diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/webhook.go b/vendor/github.com/mattermost/mattermost-cloud/model/webhook.go index af21c509..7efd4ff2 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/webhook.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/webhook.go @@ -40,6 +40,7 @@ type Webhook struct { URL string CreateAt int64 DeleteAt int64 + Headers Headers } // WebhookFilter describes the parameters used to constrain a set of webhooks. diff --git a/vendor/github.com/mattermost/mattermost-cloud/model/webhook_request.go b/vendor/github.com/mattermost/mattermost-cloud/model/webhook_request.go index e1aaf222..8375a3ea 100644 --- a/vendor/github.com/mattermost/mattermost-cloud/model/webhook_request.go +++ b/vendor/github.com/mattermost/mattermost-cloud/model/webhook_request.go @@ -17,6 +17,7 @@ import ( type CreateWebhookRequest struct { OwnerID string URL string + Headers Headers } // NewCreateWebhookRequestFromReader will create a CreateWebhookRequest from an io.Reader with JSON data. diff --git a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1/clusterinstallation_sizes.go b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1/clusterinstallation_sizes.go index 7b34b1fb..2bbb861b 100644 --- a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1/clusterinstallation_sizes.go +++ b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1/clusterinstallation_sizes.go @@ -288,8 +288,8 @@ var sizeMiniSingleton = ClusterInstallationSize{ Replicas: 1, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("256Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("150Mi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2000m"), @@ -301,8 +301,8 @@ var sizeMiniSingleton = ClusterInstallationSize{ Replicas: 1, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("512Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("256Mi"), }, }, }, @@ -325,8 +325,8 @@ var sizeMiniHA = ClusterInstallationSize{ Replicas: 2, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("512Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("150Mi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("2000m"), @@ -338,8 +338,8 @@ var sizeMiniHA = ClusterInstallationSize{ Replicas: 4, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("512Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("256Mi"), }, }, }, diff --git a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1/clusterinstallation_utils.go b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1/clusterinstallation_utils.go index 2fee52b9..2ccc8c0c 100644 --- a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1/clusterinstallation_utils.go +++ b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1/clusterinstallation_utils.go @@ -15,7 +15,7 @@ const ( // DefaultMattermostImage is the default Mattermost docker image DefaultMattermostImage = "mattermost/mattermost-enterprise-edition" // DefaultMattermostVersion is the default Mattermost docker tag - DefaultMattermostVersion = "6.5.0" + DefaultMattermostVersion = "7.5.1" // DefaultMattermostSize is the default number of users DefaultMattermostSize = "5000users" // DefaultMattermostDatabaseType is the default Mattermost database diff --git a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/file_store_util.go b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/file_store_util.go index a5af584e..7299c4b9 100644 --- a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/file_store_util.go +++ b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/file_store_util.go @@ -12,7 +12,7 @@ import ( // SetDefaults sets the missing values in FileStore to the default ones. func (fs *FileStore) SetDefaults() { - if fs.IsExternal() { + if fs.isAnyExceptOperatorManaged() { return } @@ -25,6 +25,24 @@ func (fs *FileStore) IsExternal() bool { return fs.External != nil && fs.External.URL != "" } +// IsExternalVolume returns true if the filestore requested is an externally +// managed volume. +func (fs *FileStore) IsExternalVolume() bool { + return fs.ExternalVolume != nil && fs.ExternalVolume.VolumeClaimName != "" +} + +// IsLocal returns true if the filestore requested is local (PVC backed). +func (fs *FileStore) IsLocal() bool { + return fs.Local != nil && fs.Local.Enabled +} + +// isAnyExceptOperatorManaged checks if any filestore types are configurated +// except the operator managed type. This is generally used to see if defaults +// should be applied. +func (fs *FileStore) isAnyExceptOperatorManaged() bool { + return fs.IsExternal() || fs.IsExternalVolume() || fs.IsLocal() +} + func (fs *FileStore) ensureDefault() { if fs.OperatorManaged == nil { fs.OperatorManaged = &OperatorManagedMinio{} @@ -39,7 +57,7 @@ func (omm *OperatorManagedMinio) SetDefaults() { } func (fs *FileStore) SetDefaultReplicasAndResources() { - if fs.IsExternal() { + if fs.isAnyExceptOperatorManaged() { return } fs.ensureDefault() @@ -56,7 +74,7 @@ func (omm *OperatorManagedMinio) SetDefaultReplicasAndResources() { } func (fs *FileStore) OverrideReplicasAndResourcesFromSize(size mattermostv1alpha1.ClusterInstallationSize) { - if fs.IsExternal() { + if fs.isAnyExceptOperatorManaged() { return } fs.ensureDefault() diff --git a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/mattermost_types.go b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/mattermost_types.go index e2165868..b7bbfe08 100644 --- a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/mattermost_types.go +++ b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/mattermost_types.go @@ -75,6 +75,9 @@ type MattermostSpec struct { // Ingress defines configuration for Ingress resource created by the Operator. // +optional Ingress *Ingress `json:"ingress,omitempty"` + + // +optional + AWSLoadBalancerController *AWSLoadBalancerController `json:"awsLoadBalancerController,omitempty"` // Volumes allows for mounting volumes from various sources into the // Mattermost application pods. // +optional @@ -117,6 +120,10 @@ type MattermostSpec struct { // +optional PodTemplate *PodTemplate `json:"podTemplate,omitempty"` + // DeploymentTemplate defines configuration for the template for Mattermost deployment. + // +optional + DeploymentTemplate *DeploymentTemplate `json:"deploymentTemplate,omitempty"` + // UpdateJob defines configuration for the template for the update job. // +optional UpdateJob *UpdateJob `json:"updateJob,omitempty"` @@ -177,6 +184,32 @@ type Ingress struct { IngressClass *string `json:"ingressClass,omitempty"` } +type AWSLoadBalancerController struct { + // An AWS ALB Ingress will be created instead of nginx + // +optional + Enabled bool `json:"enabled,omitempty"` + + // Certificate arn for the ALB, required if SSL enabled + // +optional + CertificateARN string `json:"certificateARN,omitempty"` + + // Whether the Ingress will be internetfacing, default is false + // +optional + InternetFacing bool `json:"internetFacing,omitempty"` + + // Hosts allows specifying additional domain names for Mattermost to use. + // +optional + Hosts []IngressHost `json:"hosts,omitempty"` + + // IngressClassName for your ingress + // +optional + IngressClassName string `json:"ingressClassName,omitempty"` + + // Annotations defines annotations passed to the Ingress associated with Mattermost. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + // IngressHost specifies additional hosts configuration. type IngressHost struct { HostName string `json:"hostName,omitempty"` @@ -230,6 +263,13 @@ type PodTemplate struct { ExtraLabels map[string]string `json:"extraLabels,omitempty"` } +// DeploymentTemplate defines configuration for the template for Mattermost deployment. +type DeploymentTemplate struct { + // Defines the revision history limit for the mattermost deployment. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` +} + // UpdateJob defines configuration for the template for the update job pod. type UpdateJob struct { // Determines whether to disable the Operator's creation of the update job. @@ -320,9 +360,15 @@ type FileStore struct { // Defines the configuration of an external file store. // +optional External *ExternalFileStore `json:"external,omitempty"` + // Defines the configuration of externally managed PVC backed storage. + // +optional + ExternalVolume *ExternalVolumeFileStore `json:"externalVolume,omitempty"` // Defines the configuration of file store managed by Kubernetes operator. // +optional OperatorManaged *OperatorManagedMinio `json:"operatorManaged,omitempty"` + // Defines the configuration of PVC backed storage (local). This is NOT recommended for production environments. + // +optional + Local *LocalFileStore `json:"local,omitempty"` } // ExternalFileStore defines the configuration of the external file store that should be used by Mattermost. @@ -334,6 +380,16 @@ type ExternalFileStore struct { // Optionally enter the name of already existing secret. // Secret should have two values: "accesskey" and "secretkey". Secret string `json:"secret,omitempty"` + + // Optionally use service account with IAM role to access AWS services, like S3. + UseServiceAccount bool `json:"useServiceAccount,omitempty"` +} + +// ExternalVolumeFileStore defines the configuration of an externally managed +// volume file store. +type ExternalVolumeFileStore struct { + // The name of the matching volume claim for the externally managed volume. + VolumeClaimName string `json:"volumeClaimName,omitempty"` } // OperatorManagedMinio defines the configuration of a Minio file store managed by Kubernetes Operator. @@ -355,6 +411,16 @@ type OperatorManagedMinio struct { Resources v1.ResourceRequirements `json:"resources,omitempty"` } +// LocalFileStore defines the configuration of the local file store that should be used by Mattermost (PVC configuration). +type LocalFileStore struct { + // Set to use local (PVC) storage, require explicit enabled to prevent accidental misconfiguration. + Enabled bool `json:"enabled"` + // Defines the storage size for the PVC. (default 50Gi) + // +optional + // +kubebuilder:validation:Pattern=^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + StorageSize string `json:"storageSize,omitempty"` +} + // ElasticSearch defines the ElasticSearch configuration for Mattermost. type ElasticSearch struct { Host string `json:"host,omitempty"` diff --git a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/mattermost_utils.go b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/mattermost_utils.go index 3db271e8..8ee8610c 100644 --- a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/mattermost_utils.go +++ b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/mattermost_utils.go @@ -19,17 +19,19 @@ const ( // DefaultMattermostImage is the default Mattermost docker image DefaultMattermostImage = "mattermost/mattermost-enterprise-edition" // DefaultMattermostVersion is the default Mattermost docker tag - DefaultMattermostVersion = "6.5.0" + DefaultMattermostVersion = "7.5.1" // DefaultMattermostSize is the default number of users DefaultMattermostSize = "5000users" // DefaultMattermostDatabaseType is the default Mattermost database DefaultMattermostDatabaseType = "mysql" - // DefaultFilestoreStorageSize is the default Storage size for Minio + // DefaultFilestoreStorageSize is the default Storage size for Minio or Local Storage DefaultFilestoreStorageSize = "50Gi" // DefaultStorageSize is the default Storage size for the Database DefaultStorageSize = "50Gi" // DefaultPullPolicy is the default Pull Policy used by Mattermost app container DefaultPullPolicy = corev1.PullIfNotPresent + // DefaultLocalFilePath is the default file path used with local (PVC) storage + DefaultLocalFilePath = "/mattermost/data" // ClusterLabel is the label applied across all components ClusterLabel = "installation.mattermost.com/installation" @@ -43,9 +45,13 @@ const ( MattermostAppContainerName = "mattermost" ) -// SetDefaults set the missing values in the manifest to the default ones +// SetDefaults sets the missing values in the manifest to the default ones func (mm *Mattermost) SetDefaults() error { - if mm.IngressEnabled() && mm.GetIngressHost() == "" { + if mm.AWSLoadBalancerEnabled() && len(mm.Spec.AWSLoadBalancerController.Hosts) == 0 { + return errors.New("awsLoadBalancerController.hosts is required, but not set") + } + + if !mm.AWSLoadBalancerEnabled() && mm.IngressEnabled() && mm.GetIngressHost() == "" { return errors.New("ingress.host required, but not set") } if mm.Spec.Image == "" { @@ -72,6 +78,13 @@ func (mm *Mattermost) IngressEnabled() bool { return true } +func (mm *Mattermost) AWSLoadBalancerEnabled() bool { + if mm.Spec.AWSLoadBalancerController != nil { + return mm.Spec.AWSLoadBalancerController.Enabled + } + return false +} + // GetIngressHost returns Mattermost primary Ingress host. func (mm *Mattermost) GetIngressHost() string { if mm.Spec.Ingress == nil { @@ -111,6 +124,18 @@ func (mm *Mattermost) GetIngressHostNames() []string { return hosts } +func (mm *Mattermost) GetAWSLoadBalancerHostNames() []string { + hosts := []string{} + + if mm.Spec.AWSLoadBalancerController != nil { + for _, host := range mm.Spec.AWSLoadBalancerController.Hosts { + hosts = append(hosts, host.HostName) + } + } + + return hosts +} + // GetIngresAnnotations returns Mattermost Ingress annotations. func (mm *Mattermost) GetIngresAnnotations() map[string]string { if mm.Spec.Ingress == nil { @@ -119,6 +144,11 @@ func (mm *Mattermost) GetIngresAnnotations() map[string]string { return mm.Spec.Ingress.Annotations } +// GetIngresAnnotations returns Mattermost Ingress annotations. +func (mm *Mattermost) GetAWSLoadBalancerIngressAnnotations() map[string]string { + return mm.Spec.AWSLoadBalancerController.Annotations +} + // GetIngressTLSSecret returns Mattermost Ingress TLS secret. func (mm *Mattermost) GetIngressTLSSecret() string { if mm.Spec.Ingress != nil { diff --git a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/zz_generated.deepcopy.go index 556959d7..ceecadc7 100644 --- a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/zz_generated.deepcopy.go @@ -13,6 +13,33 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSLoadBalancerController) DeepCopyInto(out *AWSLoadBalancerController) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]IngressHost, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerController. +func (in *AWSLoadBalancerController) DeepCopy() *AWSLoadBalancerController { + if in == nil { + return nil + } + out := new(AWSLoadBalancerController) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Database) DeepCopyInto(out *Database) { *out = *in @@ -38,6 +65,26 @@ func (in *Database) DeepCopy() *Database { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTemplate) DeepCopyInto(out *DeploymentTemplate) { + *out = *in + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTemplate. +func (in *DeploymentTemplate) DeepCopy() *DeploymentTemplate { + if in == nil { + return nil + } + out := new(DeploymentTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ElasticSearch) DeepCopyInto(out *ElasticSearch) { *out = *in @@ -83,6 +130,21 @@ func (in *ExternalFileStore) DeepCopy() *ExternalFileStore { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalVolumeFileStore) DeepCopyInto(out *ExternalVolumeFileStore) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalVolumeFileStore. +func (in *ExternalVolumeFileStore) DeepCopy() *ExternalVolumeFileStore { + if in == nil { + return nil + } + out := new(ExternalVolumeFileStore) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FileStore) DeepCopyInto(out *FileStore) { *out = *in @@ -91,11 +153,21 @@ func (in *FileStore) DeepCopyInto(out *FileStore) { *out = new(ExternalFileStore) **out = **in } + if in.ExternalVolume != nil { + in, out := &in.ExternalVolume, &out.ExternalVolume + *out = new(ExternalVolumeFileStore) + **out = **in + } if in.OperatorManaged != nil { in, out := &in.OperatorManaged, &out.OperatorManaged *out = new(OperatorManagedMinio) (*in).DeepCopyInto(*out) } + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalFileStore) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileStore. @@ -155,6 +227,21 @@ func (in *IngressHost) DeepCopy() *IngressHost { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalFileStore) DeepCopyInto(out *LocalFileStore) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalFileStore. +func (in *LocalFileStore) DeepCopy() *LocalFileStore { + if in == nil { + return nil + } + out := new(LocalFileStore) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Mattermost) DeepCopyInto(out *Mattermost) { *out = *in @@ -255,6 +342,11 @@ func (in *MattermostSpec) DeepCopyInto(out *MattermostSpec) { *out = new(Ingress) (*in).DeepCopyInto(*out) } + if in.AWSLoadBalancerController != nil { + in, out := &in.AWSLoadBalancerController, &out.AWSLoadBalancerController + *out = new(AWSLoadBalancerController) + (*in).DeepCopyInto(*out) + } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]v1.Volume, len(*in)) @@ -289,6 +381,11 @@ func (in *MattermostSpec) DeepCopyInto(out *MattermostSpec) { *out = new(PodTemplate) (*in).DeepCopyInto(*out) } + if in.DeploymentTemplate != nil { + in, out := &in.DeploymentTemplate, &out.DeploymentTemplate + *out = new(DeploymentTemplate) + (*in).DeepCopyInto(*out) + } if in.UpdateJob != nil { in, out := &in.UpdateJob, &out.UpdateJob *out = new(UpdateJob) diff --git a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/zz_generated.openapi.go b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/zz_generated.openapi.go index 917aaad8..492caf11 100644 --- a/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/zz_generated.openapi.go +++ b/vendor/github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1/zz_generated.openapi.go @@ -198,6 +198,11 @@ func schema_mattermost_operator_apis_mattermost_v1beta1_MattermostSpec(ref commo Ref: ref("github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Ingress"), }, }, + "awsLoadBalancerController": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.AWSLoadBalancerController"), + }, + }, "volumes": { SchemaProps: spec.SchemaProps{ Description: "Volumes allows for mounting volumes from various sources into the Mattermost application pods.", @@ -299,6 +304,12 @@ func schema_mattermost_operator_apis_mattermost_v1beta1_MattermostSpec(ref commo Ref: ref("github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.PodTemplate"), }, }, + "deploymentTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "DeploymentTemplate defines configuration for the template for Mattermost deployment.", + Ref: ref("github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.DeploymentTemplate"), + }, + }, "updateJob": { SchemaProps: spec.SchemaProps{ Description: "UpdateJob defines configuration for the template for the update job.", @@ -322,6 +333,6 @@ func schema_mattermost_operator_apis_mattermost_v1beta1_MattermostSpec(ref commo }, }, Dependencies: []string{ - "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Database", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.ElasticSearch", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.FileStore", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Ingress", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.PodExtensions", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.PodTemplate", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Probes", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.ResourcePatch", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Scheduling", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.UpdateJob", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.AWSLoadBalancerController", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Database", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.DeploymentTemplate", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.ElasticSearch", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.FileStore", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Ingress", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.PodExtensions", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.PodTemplate", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Probes", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.ResourcePatch", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.Scheduling", "github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1.UpdateJob", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, } } diff --git a/vendor/github.com/mattermost/mattermost-operator/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/mattermost/mattermost-operator/pkg/client/clientset/versioned/scheme/register.go index d0725102..ab555ba2 100644 --- a/vendor/github.com/mattermost/mattermost-operator/pkg/client/clientset/versioned/scheme/register.go +++ b/vendor/github.com/mattermost/mattermost-operator/pkg/client/clientset/versioned/scheme/register.go @@ -24,14 +24,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/mattermost/mattermost-operator/pkg/client/v1beta1/clientset/versioned/scheme/register.go b/vendor/github.com/mattermost/mattermost-operator/pkg/client/v1beta1/clientset/versioned/scheme/register.go index e8bab8c3..0a2c4236 100644 --- a/vendor/github.com/mattermost/mattermost-operator/pkg/client/v1beta1/clientset/versioned/scheme/register.go +++ b/vendor/github.com/mattermost/mattermost-operator/pkg/client/v1beta1/clientset/versioned/scheme/register.go @@ -24,14 +24,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go deleted file mode 100644 index 3b3829fd..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "os" - - "github.com/mattermost/logr" -) - -// defaultLog manually encodes the log to STDERR, providing a basic, default logging implementation -// before mlog is fully configured. -func defaultLog(level, msg string, fields ...Field) { - log := struct { - Level string `json:"level"` - Message string `json:"msg"` - Fields []Field `json:"fields,omitempty"` - }{ - level, - msg, - fields, - } - - if b, err := json.Marshal(log); err != nil { - fmt.Fprintf(os.Stderr, `{"level":"error","msg":"failed to encode log message"}%s`, "\n") - } else { - fmt.Fprintf(os.Stderr, "%s\n", b) - } -} - -func defaultDebugLog(msg string, fields ...Field) { - defaultLog("debug", msg, fields...) -} - -func defaultInfoLog(msg string, fields ...Field) { - defaultLog("info", msg, fields...) -} - -func defaultWarnLog(msg string, fields ...Field) { - defaultLog("warn", msg, fields...) -} - -func defaultErrorLog(msg string, fields ...Field) { - defaultLog("error", msg, fields...) -} - -func defaultCriticalLog(msg string, fields ...Field) { - // We map critical to error in zap, so be consistent. - defaultLog("error", msg, fields...) -} - -func defaultCustomLog(lvl LogLevel, msg string, fields ...Field) { - // custom log levels are only output once log targets are configured. -} - -func defaultCustomMultiLog(lvl []LogLevel, msg string, fields ...Field) { - // custom log levels are only output once log targets are configured. -} - -func defaultFlush(ctx context.Context) error { - return nil -} - -func defaultAdvancedConfig(cfg LogTargetCfg) error { - // mlog.ConfigAdvancedConfig should not be called until default - // logger is replaced with mlog.Logger instance. - return errors.New("cannot config advanced logging on default logger") -} - -func defaultAdvancedShutdown(ctx context.Context) error { - return nil -} - -func defaultAddTarget(target logr.Target) error { - // mlog.AddTarget should not be called until default - // logger is replaced with mlog.Logger instance. - return errors.New("cannot AddTarget on default logger") -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go deleted file mode 100644 index f8d58968..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import "github.com/mattermost/logr" - -// onLoggerError is called when the logging system encounters an error, -// such as a target not able to write records. The targets will keep trying -// however the error will be logged with a dedicated level that can be output -// to a safe/always available target for monitoring or alerting. -func onLoggerError(err error) { - Log(LvlLogError, "advanced logging error", Err(err)) -} - -// onQueueFull is called when the main logger queue is full, indicating the -// volume and frequency of log record creation is too high for the queue size -// and/or the target latencies. -func onQueueFull(rec *logr.LogRec, maxQueueSize int) bool { - Log(LvlLogError, "main queue full, dropping record", Any("rec", rec)) - return true // drop record -} - -// onTargetQueueFull is called when the main logger queue is full, indicating the -// volume and frequency of log record creation is too high for the target's queue size -// and/or the target latency. -func onTargetQueueFull(target logr.Target, rec *logr.LogRec, maxQueueSize int) bool { - Log(LvlLogError, "target queue full, dropping record", String("target", ""), Any("rec", rec)) - return true // drop record -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go deleted file mode 100644 index 5eec280b..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import ( - "context" - - "github.com/mattermost/logr" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -var globalLogger *Logger - -func InitGlobalLogger(logger *Logger) { - // Clean up previous instance. - if globalLogger != nil && globalLogger.logrLogger != nil { - globalLogger.logrLogger.Logr().Shutdown() - } - glob := *logger - glob.zap = glob.zap.WithOptions(zap.AddCallerSkip(1)) - globalLogger = &glob - Debug = globalLogger.Debug - Info = globalLogger.Info - Warn = globalLogger.Warn - Error = globalLogger.Error - Critical = globalLogger.Critical - Log = globalLogger.Log - LogM = globalLogger.LogM - Flush = globalLogger.Flush - ConfigAdvancedLogging = globalLogger.ConfigAdvancedLogging - ShutdownAdvancedLogging = globalLogger.ShutdownAdvancedLogging - AddTarget = globalLogger.AddTarget -} - -func RedirectStdLog(logger *Logger) { - zap.RedirectStdLogAt(logger.zap.With(zap.String("source", "stdlog")).WithOptions(zap.AddCallerSkip(-2)), zapcore.ErrorLevel) -} - -type LogFunc func(string, ...Field) -type LogFuncCustom func(LogLevel, string, ...Field) -type LogFuncCustomMulti func([]LogLevel, string, ...Field) -type FlushFunc func(context.Context) error -type ConfigFunc func(cfg LogTargetCfg) error -type ShutdownFunc func(context.Context) error -type AddTargetFunc func(logr.Target) error - -// DON'T USE THIS Modify the level on the app logger -func GloballyDisableDebugLogForTest() { - globalLogger.consoleLevel.SetLevel(zapcore.ErrorLevel) -} - -// DON'T USE THIS Modify the level on the app logger -func GloballyEnableDebugLogForTest() { - globalLogger.consoleLevel.SetLevel(zapcore.DebugLevel) -} - -var Debug LogFunc = defaultDebugLog -var Info LogFunc = defaultInfoLog -var Warn LogFunc = defaultWarnLog -var Error LogFunc = defaultErrorLog -var Critical LogFunc = defaultCriticalLog -var Log LogFuncCustom = defaultCustomLog -var LogM LogFuncCustomMulti = defaultCustomMultiLog -var Flush FlushFunc = defaultFlush - -var ConfigAdvancedLogging ConfigFunc = defaultAdvancedConfig -var ShutdownAdvancedLogging ShutdownFunc = defaultAdvancedShutdown -var AddTarget AddTargetFunc = defaultAddTarget diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go deleted file mode 100644 index d6001da0..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -// Standard levels -var ( - LvlPanic = LogLevel{ID: 0, Name: "panic"} - LvlFatal = LogLevel{ID: 1, Name: "fatal"} - LvlError = LogLevel{ID: 2, Name: "error"} - LvlWarn = LogLevel{ID: 3, Name: "warn"} - LvlInfo = LogLevel{ID: 4, Name: "info"} - LvlDebug = LogLevel{ID: 5, Name: "debug"} - LvlTrace = LogLevel{ID: 6, Name: "trace"} - // used only by the logger - LvlLogError = LogLevel{ID: 11, Name: "logerror"} -) - -// Register custom (discrete) levels here... -// ! ID's must not exceed 32,768 ! -var ( - // used by the audit system - LvlAuditDebug = LogLevel{ID: 100, Name: "AuditDebug"} - LvlAuditError = LogLevel{ID: 101, Name: "AuditError"} - // used by the TCP log target - LvlTcpLogTarget = LogLevel{ID: 105, Name: "TcpLogTarget"} -) - -// Combinations for LogM (log multi) -var ( - MLvlExample = []LogLevel{LvlAuditDebug, LvlDebug} -) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go deleted file mode 100644 index f2e99a12..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import ( - "context" - "io" - "log" - "os" - - "github.com/mattermost/logr" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "gopkg.in/natefinch/lumberjack.v2" -) - -const ( - // Very verbose messages for debugging specific issues - LevelDebug = "debug" - // Default log level, informational - LevelInfo = "info" - // Warnings are messages about possible issues - LevelWarn = "warn" - // Errors are messages about things we know are problems - LevelError = "error" -) - -// Type and function aliases from zap to limit the libraries scope into MM code -type Field = zapcore.Field - -var Int64 = zap.Int64 -var Int32 = zap.Int32 -var Int = zap.Int -var Uint32 = zap.Uint32 -var String = zap.String -var Any = zap.Any -var Err = zap.Error -var NamedErr = zap.NamedError -var Bool = zap.Bool -var Duration = zap.Duration - -type LoggerConfiguration struct { - EnableConsole bool - ConsoleJson bool - ConsoleLevel string - EnableFile bool - FileJson bool - FileLevel string - FileLocation string -} - -type Logger struct { - zap *zap.Logger - consoleLevel zap.AtomicLevel - fileLevel zap.AtomicLevel - logrLogger *logr.Logger -} - -func getZapLevel(level string) zapcore.Level { - switch level { - case LevelInfo: - return zapcore.InfoLevel - case LevelWarn: - return zapcore.WarnLevel - case LevelDebug: - return zapcore.DebugLevel - case LevelError: - return zapcore.ErrorLevel - default: - return zapcore.InfoLevel - } -} - -func makeEncoder(json bool) zapcore.Encoder { - encoderConfig := zap.NewProductionEncoderConfig() - if json { - return zapcore.NewJSONEncoder(encoderConfig) - } - - encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - return zapcore.NewConsoleEncoder(encoderConfig) -} - -func NewLogger(config *LoggerConfiguration) *Logger { - cores := []zapcore.Core{} - logger := &Logger{ - consoleLevel: zap.NewAtomicLevelAt(getZapLevel(config.ConsoleLevel)), - fileLevel: zap.NewAtomicLevelAt(getZapLevel(config.FileLevel)), - } - - if config.EnableConsole { - writer := zapcore.Lock(os.Stderr) - core := zapcore.NewCore(makeEncoder(config.ConsoleJson), writer, logger.consoleLevel) - cores = append(cores, core) - } - - if config.EnableFile { - writer := zapcore.AddSync(&lumberjack.Logger{ - Filename: config.FileLocation, - MaxSize: 100, - Compress: true, - }) - core := zapcore.NewCore(makeEncoder(config.FileJson), writer, logger.fileLevel) - cores = append(cores, core) - } - - combinedCore := zapcore.NewTee(cores...) - - logger.zap = zap.New(combinedCore, - zap.AddCaller(), - ) - return logger -} - -func (l *Logger) ChangeLevels(config *LoggerConfiguration) { - l.consoleLevel.SetLevel(getZapLevel(config.ConsoleLevel)) - l.fileLevel.SetLevel(getZapLevel(config.FileLevel)) -} - -func (l *Logger) SetConsoleLevel(level string) { - l.consoleLevel.SetLevel(getZapLevel(level)) -} - -func (l *Logger) With(fields ...Field) *Logger { - newlogger := *l - newlogger.zap = newlogger.zap.With(fields...) - if newlogger.logrLogger != nil { - ll := newlogger.logrLogger.WithFields(zapToLogr(fields)) - newlogger.logrLogger = &ll - } - return &newlogger -} - -func (l *Logger) StdLog(fields ...Field) *log.Logger { - return zap.NewStdLog(l.With(fields...).zap.WithOptions(getStdLogOption())) -} - -// StdLogAt returns *log.Logger which writes to supplied zap logger at required level. -func (l *Logger) StdLogAt(level string, fields ...Field) (*log.Logger, error) { - return zap.NewStdLogAt(l.With(fields...).zap.WithOptions(getStdLogOption()), getZapLevel(level)) -} - -// StdLogWriter returns a writer that can be hooked up to the output of a golang standard logger -// anything written will be interpreted as log entries accordingly -func (l *Logger) StdLogWriter() io.Writer { - newLogger := *l - newLogger.zap = newLogger.zap.WithOptions(zap.AddCallerSkip(4), getStdLogOption()) - f := newLogger.Info - return &loggerWriter{f} -} - -func (l *Logger) WithCallerSkip(skip int) *Logger { - newlogger := *l - newlogger.zap = newlogger.zap.WithOptions(zap.AddCallerSkip(skip)) - return &newlogger -} - -// Made for the plugin interface, wraps mlog in a simpler interface -// at the cost of performance -func (l *Logger) Sugar() *SugarLogger { - return &SugarLogger{ - wrappedLogger: l, - zapSugar: l.zap.Sugar(), - } -} - -func (l *Logger) Debug(message string, fields ...Field) { - l.zap.Debug(message, fields...) - if l.logrLogger != nil && isLevelEnabled(l.logrLogger, logr.Debug) { - l.logrLogger.WithFields(zapToLogr(fields)).Debug(message) - } -} - -func (l *Logger) Info(message string, fields ...Field) { - l.zap.Info(message, fields...) - if l.logrLogger != nil && isLevelEnabled(l.logrLogger, logr.Info) { - l.logrLogger.WithFields(zapToLogr(fields)).Info(message) - } -} - -func (l *Logger) Warn(message string, fields ...Field) { - l.zap.Warn(message, fields...) - if l.logrLogger != nil && isLevelEnabled(l.logrLogger, logr.Warn) { - l.logrLogger.WithFields(zapToLogr(fields)).Warn(message) - } -} - -func (l *Logger) Error(message string, fields ...Field) { - l.zap.Error(message, fields...) - if l.logrLogger != nil && isLevelEnabled(l.logrLogger, logr.Error) { - l.logrLogger.WithFields(zapToLogr(fields)).Error(message) - } -} - -func (l *Logger) Critical(message string, fields ...Field) { - l.zap.Error(message, fields...) - if l.logrLogger != nil && isLevelEnabled(l.logrLogger, logr.Error) { - l.logrLogger.WithFields(zapToLogr(fields)).Error(message) - } -} - -func (l *Logger) Log(level LogLevel, message string, fields ...Field) { - if l.logrLogger != nil && isLevelEnabled(l.logrLogger, logr.Level(level)) { - l.logrLogger.WithFields(zapToLogr(fields)).Log(logr.Level(level), message) - } -} - -func (l *Logger) LogM(levels []LogLevel, message string, fields ...Field) { - if l.logrLogger != nil { - var logger *logr.Logger - for _, lvl := range levels { - if isLevelEnabled(l.logrLogger, logr.Level(lvl)) { - // don't create logger with fields unless at least one level is active. - if logger == nil { - l := l.logrLogger.WithFields(zapToLogr(fields)) - logger = &l - } - logger.Log(logr.Level(lvl), message) - } - } - } -} - -func (l *Logger) Flush(cxt context.Context) error { - if l.logrLogger != nil { - return l.logrLogger.Logr().Flush() // TODO: use context when Logr lib supports it. - } - return nil -} - -// ShutdownAdvancedLogging stops the logger from accepting new log records and tries to -// flush queues within the context timeout. Once complete all targets are shutdown -// and any resources released. -func (l *Logger) ShutdownAdvancedLogging(cxt context.Context) error { - var err error - if l.logrLogger != nil { - err = l.logrLogger.Logr().Shutdown() // TODO: use context when Logr lib supports it. - l.logrLogger = nil - } - return err -} - -// ConfigAdvancedLoggingConfig (re)configures advanced logging based on the -// specified log targets. This is the easiest way to get the advanced logger -// configured via a config source such as file. -func (l *Logger) ConfigAdvancedLogging(targets LogTargetCfg) error { - if l.logrLogger != nil { - if err := l.ShutdownAdvancedLogging(context.Background()); err != nil { - Error("error shutting down previous logger", Err(err)) - } - } - - logr, err := newLogr(targets) - l.logrLogger = logr - return err -} - -// AddTarget adds a logr.Target to the advanced logger. This is the preferred method -// to add custom targets or provide configuration that cannot be expressed via a -//config source. -func (l *Logger) AddTarget(target logr.Target) error { - return l.logrLogger.Logr().AddTarget(target) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go deleted file mode 100644 index 32ec9e34..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import ( - "encoding/json" - "fmt" - "io" - "os" - - "github.com/hashicorp/go-multierror" - "github.com/mattermost/logr" - logrFmt "github.com/mattermost/logr/format" - "github.com/mattermost/logr/target" - "go.uber.org/zap/zapcore" -) - -const ( - DefaultMaxTargetQueue = 1000 - DefaultSysLogPort = 514 -) - -type LogLevel struct { - ID logr.LevelID - Name string - Stacktrace bool -} - -type LogTarget struct { - Type string // one of "console", "file", "tcp", "syslog". - Format string // one of "json", "plain" - Levels []LogLevel - Options json.RawMessage - MaxQueueSize int -} - -type LogTargetCfg map[string]*LogTarget -type LogrCleanup func() error - -func newLogr(targets LogTargetCfg) (*logr.Logger, error) { - var errs error - - lgr := logr.Logr{} - - lgr.OnExit = func(int) {} - lgr.OnPanic = func(interface{}) {} - - lgr.OnLoggerError = onLoggerError - lgr.OnQueueFull = onQueueFull - lgr.OnTargetQueueFull = onTargetQueueFull - - for name, t := range targets { - target, err := newLogrTarget(name, t) - if err != nil { - errs = multierror.Append(err) - continue - } - lgr.AddTarget(target) - } - logger := lgr.NewLogger() - return &logger, errs -} - -func newLogrTarget(name string, t *LogTarget) (logr.Target, error) { - formatter, err := newFormatter(name, t.Format) - if err != nil { - return nil, err - } - filter, err := newFilter(name, t.Levels) - if err != nil { - return nil, err - } - - if t.MaxQueueSize == 0 { - t.MaxQueueSize = DefaultMaxTargetQueue - } - - switch t.Type { - case "console": - return newConsoleTarget(name, t, filter, formatter) - case "file": - return newFileTarget(name, t, filter, formatter) - case "syslog": - return newSyslogTarget(name, t, filter, formatter) - case "tcp": - return newTCPTarget(name, t, filter, formatter) - } - return nil, fmt.Errorf("invalid type '%s' for target %s", t.Type, name) -} - -func newFilter(name string, levels []LogLevel) (logr.Filter, error) { - filter := &logr.CustomFilter{} - for _, lvl := range levels { - filter.Add(logr.Level(lvl)) - } - return filter, nil -} - -func newFormatter(name string, format string) (logr.Formatter, error) { - switch format { - case "json", "": - return &logrFmt.JSON{}, nil - case "plain": - return &logrFmt.Plain{Delim: " | "}, nil - default: - return nil, fmt.Errorf("invalid format '%s' for target %s", format, name) - } -} - -func newConsoleTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { - type consoleOptions struct { - Out string `json:"Out"` - } - options := &consoleOptions{} - if err := json.Unmarshal(t.Options, options); err != nil { - return nil, err - } - - var w io.Writer - switch options.Out { - case "stdout", "": - w = os.Stdout - case "stderr": - w = os.Stderr - default: - return nil, fmt.Errorf("invalid out '%s' for target %s", options.Out, name) - } - - newTarget := target.NewWriterTarget(filter, formatter, w, t.MaxQueueSize) - return newTarget, nil -} - -func newFileTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { - type fileOptions struct { - Filename string `json:"Filename"` - MaxSize int `json:"MaxSizeMB"` - MaxAge int `json:"MaxAgeDays"` - MaxBackups int `json:"MaxBackups"` - Compress bool `json:"Compress"` - } - options := &fileOptions{} - if err := json.Unmarshal(t.Options, options); err != nil { - return nil, err - } - - if options.Filename == "" { - return nil, fmt.Errorf("missing 'Filename' option for target %s", name) - } - if err := checkFileWritable(options.Filename); err != nil { - return nil, fmt.Errorf("error writing to 'Filename' for target %s: %w", name, err) - } - - newTarget := target.NewFileTarget(filter, formatter, target.FileOptions(*options), t.MaxQueueSize) - return newTarget, nil -} - -func newSyslogTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { - options := &SyslogParams{} - if err := json.Unmarshal(t.Options, options); err != nil { - return nil, err - } - - if options.IP == "" { - return nil, fmt.Errorf("missing 'IP' option for target %s", name) - } - if options.Port == 0 { - options.Port = DefaultSysLogPort - } - return NewSyslogTarget(filter, formatter, options, t.MaxQueueSize) -} - -func newTCPTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { - options := &TcpParams{} - if err := json.Unmarshal(t.Options, options); err != nil { - return nil, err - } - - if options.IP == "" { - return nil, fmt.Errorf("missing 'IP' option for target %s", name) - } - if options.Port == 0 { - return nil, fmt.Errorf("missing 'Port' option for target %s", name) - } - return NewTcpTarget(filter, formatter, options, t.MaxQueueSize) -} - -func checkFileWritable(filename string) error { - // try opening/creating the file for writing - file, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) - if err != nil { - return err - } - file.Close() - return nil -} - -func isLevelEnabled(logger *logr.Logger, level logr.Level) bool { - status := logger.Logr().IsLevelEnabled(level) - return status.Enabled -} - -// zapToLogr converts Zap fields to Logr fields. -// This will not be needed once Logr is used for all logging. -func zapToLogr(zapFields []Field) logr.Fields { - encoder := zapcore.NewMapObjectEncoder() - for _, zapField := range zapFields { - zapField.AddTo(encoder) - } - return logr.Fields(encoder.Fields) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/stdlog.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/stdlog.go deleted file mode 100644 index fd702abf..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/stdlog.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import ( - "bytes" - "strings" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// Implementation of zapcore.Core to interpret log messages from a standard logger -// and translate the levels to zapcore levels. -type stdLogLevelInterpreterCore struct { - wrappedCore zapcore.Core -} - -func stdLogInterpretZapEntry(entry zapcore.Entry) zapcore.Entry { - message := entry.Message - if strings.Index(message, "[DEBUG]") == 0 { - entry.Level = zapcore.DebugLevel - entry.Message = message[7:] - } else if strings.Index(message, "[DEBG]") == 0 { - entry.Level = zapcore.DebugLevel - entry.Message = message[6:] - } else if strings.Index(message, "[WARN]") == 0 { - entry.Level = zapcore.WarnLevel - entry.Message = message[6:] - } else if strings.Index(message, "[ERROR]") == 0 { - entry.Level = zapcore.ErrorLevel - entry.Message = message[7:] - } else if strings.Index(message, "[EROR]") == 0 { - entry.Level = zapcore.ErrorLevel - entry.Message = message[6:] - } else if strings.Index(message, "[ERR]") == 0 { - entry.Level = zapcore.ErrorLevel - entry.Message = message[5:] - } else if strings.Index(message, "[INFO]") == 0 { - entry.Level = zapcore.InfoLevel - entry.Message = message[6:] - } - return entry -} - -func (s *stdLogLevelInterpreterCore) Enabled(lvl zapcore.Level) bool { - return s.wrappedCore.Enabled(lvl) -} - -func (s *stdLogLevelInterpreterCore) With(fields []zapcore.Field) zapcore.Core { - return s.wrappedCore.With(fields) -} - -func (s *stdLogLevelInterpreterCore) Check(entry zapcore.Entry, checkedEntry *zapcore.CheckedEntry) *zapcore.CheckedEntry { - entry = stdLogInterpretZapEntry(entry) - return s.wrappedCore.Check(entry, checkedEntry) -} - -func (s *stdLogLevelInterpreterCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { - entry = stdLogInterpretZapEntry(entry) - return s.wrappedCore.Write(entry, fields) -} - -func (s *stdLogLevelInterpreterCore) Sync() error { - return s.wrappedCore.Sync() -} - -func getStdLogOption() zap.Option { - return zap.WrapCore( - func(core zapcore.Core) zapcore.Core { - return &stdLogLevelInterpreterCore{core} - }, - ) -} - -type loggerWriter struct { - logFunc func(msg string, fields ...Field) -} - -func (l *loggerWriter) Write(p []byte) (int, error) { - trimmed := string(bytes.TrimSpace(p)) - for _, line := range strings.Split(trimmed, "\n") { - l.logFunc(line) - } - return len(p), nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/sugar.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/sugar.go deleted file mode 100644 index c00a8bbf..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/sugar.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import "go.uber.org/zap" - -// Made for the plugin interface, use the regular logger for other uses -type SugarLogger struct { - wrappedLogger *Logger - zapSugar *zap.SugaredLogger -} - -func (l *SugarLogger) Debug(msg string, keyValuePairs ...interface{}) { - l.zapSugar.Debugw(msg, keyValuePairs...) -} - -func (l *SugarLogger) Info(msg string, keyValuePairs ...interface{}) { - l.zapSugar.Infow(msg, keyValuePairs...) -} - -func (l *SugarLogger) Error(msg string, keyValuePairs ...interface{}) { - l.zapSugar.Errorw(msg, keyValuePairs...) -} - -func (l *SugarLogger) Warn(msg string, keyValuePairs ...interface{}) { - l.zapSugar.Warnw(msg, keyValuePairs...) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go deleted file mode 100644 index 8766a964..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "io/ioutil" - - "github.com/mattermost/logr" - "github.com/wiggin77/merror" - syslog "github.com/wiggin77/srslog" -) - -// Syslog outputs log records to local or remote syslog. -type Syslog struct { - logr.Basic - w *syslog.Writer -} - -// SyslogParams provides parameters for dialing a syslog daemon. -type SyslogParams struct { - IP string `json:"IP"` - Port int `json:"Port"` - Tag string `json:"Tag"` - TLS bool `json:"TLS"` - Cert string `json:"Cert"` - Insecure bool `json:"Insecure"` -} - -// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog, with or without TLS. -func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) { - network := "tcp" - var config *tls.Config - - if params.TLS { - network = "tcp+tls" - config = &tls.Config{InsecureSkipVerify: params.Insecure} - if params.Cert != "" { - pool, err := getCertPool(params.Cert) - if err != nil { - return nil, err - } - config.RootCAs = pool - } - } - raddr := fmt.Sprintf("%s:%d", params.IP, params.Port) - - writer, err := syslog.DialWithTLSConfig(network, raddr, syslog.LOG_INFO, params.Tag, config) - if err != nil { - return nil, err - } - - s := &Syslog{w: writer} - s.Basic.Start(s, s, filter, formatter, maxQueue) - - return s, nil -} - -// Shutdown stops processing log records after making best effort to flush queue. -func (s *Syslog) Shutdown(ctx context.Context) error { - errs := merror.New() - - err := s.Basic.Shutdown(ctx) - errs.Append(err) - - err = s.w.Close() - errs.Append(err) - - return errs.ErrorOrNil() -} - -// getCertPool returns a x509.CertPool containing the cert(s) -// from `cert`, which can be a path to a .pem or .crt file, -// or a base64 encoded cert. -func getCertPool(cert string) (*x509.CertPool, error) { - if cert == "" { - return nil, errors.New("no cert provided") - } - - // first treat as a file and try to read. - serverCert, err := ioutil.ReadFile(cert) - if err != nil { - // maybe it's a base64 encoded cert - serverCert, err = base64.StdEncoding.DecodeString(cert) - if err != nil { - return nil, errors.New("cert cannot be read") - } - } - - pool := x509.NewCertPool() - if ok := pool.AppendCertsFromPEM(serverCert); ok { - return pool, nil - } - return nil, errors.New("cannot parse cert") -} - -// Write converts the log record to bytes, via the Formatter, -// and outputs to syslog. -func (s *Syslog) Write(rec *logr.LogRec) error { - _, stacktrace := s.IsLevelEnabled(rec.Level()) - - buf := rec.Logger().Logr().BorrowBuffer() - defer rec.Logger().Logr().ReleaseBuffer(buf) - - buf, err := s.Formatter().Format(rec, stacktrace, buf) - if err != nil { - return err - } - txt := buf.String() - - switch rec.Level() { - case logr.Panic, logr.Fatal: - err = s.w.Crit(txt) - case logr.Error: - err = s.w.Err(txt) - case logr.Warn: - err = s.w.Warning(txt) - case logr.Debug, logr.Trace: - err = s.w.Debug(txt) - default: - // logr.Info plus all custom levels. - err = s.w.Info(txt) - } - - if err != nil { - reporter := rec.Logger().Logr().ReportError - reporter(fmt.Errorf("syslog write fail: %w", err)) - // syslog writer will try to reconnect. - } - return err -} - -// String returns a string representation of this target. -func (s *Syslog) String() string { - return "SyslogTarget" -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go deleted file mode 100644 index bf1bcedf..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package mlog - -import ( - "io" - "strings" - "testing" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// testingWriter is an io.Writer that writes through t.Log -type testingWriter struct { - tb testing.TB -} - -func (tw *testingWriter) Write(b []byte) (int, error) { - tw.tb.Log(strings.TrimSpace(string(b))) - return len(b), nil -} - -// NewTestingLogger creates a Logger that proxies logs through a testing interface. -// This allows tests that spin up App instances to avoid spewing logs unless the test fails or -verbose is specified. -func NewTestingLogger(tb testing.TB, writer io.Writer) *Logger { - logWriter := &testingWriter{tb} - multiWriter := io.MultiWriter(logWriter, writer) - logWriterSync := zapcore.AddSync(multiWriter) - - testingLogger := &Logger{ - consoleLevel: zap.NewAtomicLevelAt(getZapLevel("debug")), - fileLevel: zap.NewAtomicLevelAt(getZapLevel("info")), - } - - logWriterCore := zapcore.NewCore(makeEncoder(true), logWriterSync, testingLogger.consoleLevel) - - testingLogger.zap = zap.New(logWriterCore, - zap.AddCaller(), - ) - return testingLogger -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/analytics_row.go b/vendor/github.com/mattermost/mattermost-server/v5/model/analytics_row.go deleted file mode 100644 index 1180ad22..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/analytics_row.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type AnalyticsRow struct { - Name string `json:"name"` - Value float64 `json:"value"` -} - -type AnalyticsRows []*AnalyticsRow - -func (me *AnalyticsRow) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func AnalyticsRowFromJson(data io.Reader) *AnalyticsRow { - var me *AnalyticsRow - json.NewDecoder(data).Decode(&me) - return me -} - -func (me AnalyticsRows) ToJson() string { - if b, err := json.Marshal(me); err != nil { - return "[]" - } else { - return string(b) - } -} - -func AnalyticsRowsFromJson(data io.Reader) AnalyticsRows { - var me AnalyticsRows - json.NewDecoder(data).Decode(&me) - return me -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/at_mentions.go b/vendor/github.com/mattermost/mattermost-server/v5/model/at_mentions.go deleted file mode 100644 index f41d182a..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/at_mentions.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "regexp" - "strings" -) - -var atMentionRegexp = regexp.MustCompile(`\B@[[:alnum:]][[:alnum:]\.\-_]*`) - -const usernameSpecialChars = ".-_" - -// PossibleAtMentions returns all substrings in message that look like valid @ -// mentions. -func PossibleAtMentions(message string) []string { - var names []string - - if !strings.Contains(message, "@") { - return names - } - - alreadyMentioned := make(map[string]bool) - for _, match := range atMentionRegexp.FindAllString(message, -1) { - name := NormalizeUsername(match[1:]) - if !alreadyMentioned[name] && IsValidUsername(name) { - names = append(names, name) - alreadyMentioned[name] = true - } - } - - return names -} - -// TrimUsernameSpecialChar tries to remove the last character from word if it -// is a special character for usernames (dot, dash or underscore). If not, it -// returns the same string. -func TrimUsernameSpecialChar(word string) (string, bool) { - len := len(word) - - if len > 0 && strings.LastIndexAny(word, usernameSpecialChars) == (len-1) { - return word[:len-1], true - } - - return word, false -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/audits.go b/vendor/github.com/mattermost/mattermost-server/v5/model/audits.go deleted file mode 100644 index a8f01e1b..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/audits.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type Audits []Audit - -func (o Audits) Etag() string { - if len(o) > 0 { - // the first in the list is always the most current - return Etag(o[0].CreateAt) - } else { - return "" - } -} - -func (o Audits) ToJson() string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - -func AuditsFromJson(data io.Reader) Audits { - var o Audits - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/bot_default_image.go b/vendor/github.com/mattermost/mattermost-server/v5/model/bot_default_image.go deleted file mode 100644 index d9cdd2e2..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/bot_default_image.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -var BotDefaultImage = []byte{ - 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, - 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x7e, - 0x08, 0x06, 0x00, 0x00, 0x00, 0xec, 0xa6, 0x19, 0xa2, 0x00, 0x00, 0x00, - 0x04, 0x67, 0x41, 0x4d, 0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, - 0x05, 0x00, 0x00, 0x0c, 0xe3, 0x49, 0x44, 0x41, 0x54, 0x78, 0x01, 0xed, - 0x5d, 0x5b, 0x88, 0x15, 0xc9, 0x19, 0xae, 0xf1, 0xba, 0x5e, 0xc6, 0x4b, - 0xbc, 0xa0, 0x46, 0xd7, 0x2b, 0xce, 0x2a, 0xba, 0xca, 0x8a, 0xa3, 0x82, - 0x38, 0xa3, 0x46, 0x47, 0x5d, 0x59, 0xa3, 0x2f, 0x9b, 0x28, 0x6e, 0xc2, - 0xea, 0x83, 0x22, 0x04, 0x82, 0x78, 0x41, 0xf2, 0xb2, 0x04, 0xd4, 0x07, - 0x49, 0x22, 0xc4, 0x07, 0xcd, 0x53, 0x30, 0x26, 0x1a, 0xa2, 0x08, 0xee, - 0xaa, 0xa8, 0x71, 0xb2, 0x41, 0x11, 0xbc, 0xdf, 0xd0, 0x8d, 0x6e, 0x24, - 0x82, 0xe3, 0x65, 0x32, 0x9b, 0x19, 0x1d, 0x47, 0xd7, 0xeb, 0xc9, 0xf7, - 0xb5, 0xa7, 0x0f, 0xe7, 0x9c, 0xe9, 0x3e, 0xdd, 0xe7, 0x4c, 0x57, 0x77, - 0x75, 0x75, 0xfd, 0xf0, 0x9f, 0xee, 0xae, 0xaa, 0xae, 0xfa, 0xff, 0xef, - 0xfb, 0x4f, 0x9d, 0xee, 0xaa, 0xea, 0x3e, 0x65, 0x42, 0x4f, 0xe9, 0x0b, - 0xb7, 0x46, 0x43, 0x2b, 0xd2, 0xdb, 0x91, 0xd8, 0xf6, 0x84, 0x96, 0x67, - 0x69, 0xf7, 0xf4, 0x3e, 0x36, 0xa2, 0x19, 0xfa, 0x34, 0xbd, 0xe5, 0x3e, - 0xf5, 0x31, 0xf4, 0xdf, 0xd0, 0x5b, 0xd0, 0x7f, 0xa5, 0xb7, 0x0d, 0xd8, - 0x6a, 0x25, 0x65, 0x1a, 0x78, 0xd3, 0x1b, 0x3e, 0x54, 0x41, 0x67, 0x42, - 0x2b, 0xa1, 0x24, 0x9d, 0x69, 0x32, 0xa4, 0x11, 0x95, 0x32, 0x18, 0xce, - 0x42, 0x6b, 0xa1, 0x5f, 0x43, 0x99, 0x16, 0x5b, 0x89, 0x63, 0x00, 0x74, - 0x03, 0xda, 0xb3, 0xa0, 0x24, 0xbc, 0x1a, 0x3a, 0x01, 0xda, 0x0e, 0x1a, - 0x85, 0xbc, 0x45, 0xa3, 0x57, 0xa0, 0xff, 0x80, 0x32, 0x20, 0x4e, 0x42, - 0x5b, 0xa0, 0x46, 0x02, 0x46, 0xa0, 0x3d, 0xea, 0xab, 0x81, 0xee, 0x86, - 0xb2, 0xab, 0x4e, 0x29, 0xaa, 0xb4, 0x8d, 0x36, 0xd2, 0x56, 0xda, 0x6c, - 0xa4, 0x8d, 0x08, 0x7c, 0x84, 0xf3, 0x7f, 0x03, 0x7d, 0x00, 0x55, 0x95, - 0x74, 0x37, 0xbb, 0x68, 0x33, 0x6d, 0xa7, 0x0f, 0x46, 0x8a, 0x44, 0xe0, - 0x63, 0x94, 0x3f, 0x0d, 0x75, 0x03, 0x37, 0x6e, 0xe9, 0xf4, 0x85, 0x3e, - 0x19, 0x29, 0x80, 0x00, 0xbb, 0xcc, 0x9f, 0x40, 0xf9, 0x9b, 0x1a, 0x37, - 0x82, 0xfd, 0xda, 0x4b, 0xdf, 0xe8, 0xa3, 0xf9, 0x79, 0x00, 0x08, 0xb6, - 0xf0, 0x02, 0xee, 0x73, 0xe8, 0xb7, 0x50, 0xbf, 0x40, 0xc6, 0xbd, 0x1c, - 0x7d, 0xa5, 0xcf, 0x51, 0x5d, 0xbc, 0xa2, 0x69, 0x35, 0x84, 0xb7, 0x6d, - 0xe7, 0xa0, 0x71, 0x27, 0xb4, 0x54, 0xfb, 0xe9, 0x3b, 0x31, 0x48, 0x9c, - 0x70, 0xa0, 0x66, 0x17, 0x94, 0xb7, 0x51, 0xa5, 0x82, 0xa7, 0xcb, 0x79, - 0xc4, 0x80, 0x58, 0x10, 0x13, 0xed, 0x85, 0xe3, 0x0e, 0xab, 0xa0, 0xdf, - 0x41, 0x75, 0x21, 0x30, 0x28, 0x3f, 0x88, 0x09, 0xb1, 0x89, 0xe3, 0xd8, - 0x0c, 0xcc, 0xf6, 0x16, 0x46, 0xf8, 0x57, 0xd0, 0xa0, 0x00, 0xd3, 0xb5, - 0x1e, 0x62, 0xa4, 0x5d, 0x6f, 0x50, 0x0d, 0xa7, 0xea, 0x0c, 0xf9, 0xbe, - 0x83, 0x9f, 0x58, 0x11, 0xb3, 0xd8, 0x0b, 0x6f, 0x77, 0xbe, 0x80, 0xbe, - 0x81, 0xea, 0xfa, 0x8d, 0x95, 0xe5, 0x17, 0x31, 0x23, 0x76, 0xb1, 0xbd, - 0x65, 0xe4, 0x84, 0x4c, 0xad, 0x21, 0xbe, 0xcd, 0x81, 0x4f, 0x0c, 0x89, - 0x65, 0xac, 0xe4, 0x7d, 0x58, 0x7b, 0x03, 0x2a, 0xeb, 0xdb, 0x91, 0xb4, - 0x7a, 0x89, 0x25, 0x31, 0x8d, 0x85, 0x8c, 0x87, 0x95, 0xf7, 0xa0, 0x49, - 0x23, 0x49, 0xb6, 0xbf, 0xc4, 0x94, 0xd8, 0x2a, 0x2d, 0xd5, 0xb0, 0xae, - 0x09, 0x2a, 0x1b, 0x8c, 0xa4, 0xd6, 0x4f, 0x6c, 0x89, 0xb1, 0x92, 0xf2, - 0x09, 0xac, 0xfa, 0x1e, 0x9a, 0x54, 0x72, 0xc2, 0xf2, 0x9b, 0x18, 0x13, - 0x6b, 0xa5, 0xa4, 0x1a, 0xd6, 0x18, 0xf2, 0xc3, 0x0b, 0x7e, 0x62, 0x4d, - 0xcc, 0x95, 0x90, 0x49, 0xb0, 0xe2, 0x09, 0x34, 0xac, 0x6f, 0x80, 0x69, - 0xe7, 0x1d, 0xd6, 0xc4, 0x9c, 0xd8, 0x47, 0x2a, 0x5c, 0x78, 0x59, 0x0f, - 0x35, 0xa4, 0x44, 0x83, 0x01, 0xb1, 0x27, 0x07, 0x91, 0xc8, 0x60, 0xb4, - 0x7a, 0x17, 0x6a, 0xc8, 0x8f, 0x16, 0x03, 0x72, 0x40, 0x2e, 0x42, 0x95, - 0x4e, 0x68, 0x2d, 0xc9, 0xd3, 0xb8, 0xaa, 0x05, 0x3d, 0xb9, 0x20, 0x27, - 0x45, 0x4b, 0xa9, 0xc3, 0x8c, 0xdb, 0xd1, 0xd2, 0x8f, 0x8b, 0x6e, 0xcd, - 0x9c, 0x20, 0x0b, 0x81, 0x41, 0xa8, 0xf8, 0x07, 0xd0, 0xc3, 0xb2, 0x1a, - 0xc8, 0xae, 0xf7, 0x53, 0x1c, 0xa8, 0xf6, 0x0d, 0x30, 0xf6, 0xbc, 0xe3, - 0x84, 0xdc, 0x48, 0x15, 0x5e, 0x70, 0x98, 0x2b, 0x7e, 0x75, 0xbf, 0x00, - 0xe4, 0xa6, 0xa8, 0x8b, 0xc2, 0x62, 0xd6, 0xa4, 0xbd, 0x87, 0xca, 0xff, - 0x06, 0x2d, 0x87, 0x1a, 0x51, 0x13, 0x01, 0x72, 0x43, 0x8e, 0xc8, 0x95, - 0x2f, 0x29, 0x26, 0x00, 0x7e, 0x85, 0x1a, 0x95, 0x1f, 0x8b, 0xf6, 0xe3, - 0xf5, 0x80, 0x01, 0x03, 0xc4, 0xee, 0xdd, 0xbb, 0x45, 0x5d, 0x5d, 0x9d, - 0xa5, 0xdc, 0x67, 0x9a, 0x26, 0x42, 0x8e, 0xc8, 0x55, 0xa0, 0xf2, 0x01, - 0x6a, 0x7b, 0x01, 0x8d, 0xfd, 0x6f, 0x6d, 0xbf, 0x7e, 0xfd, 0x52, 0x0f, - 0x1f, 0x3e, 0x4c, 0xe5, 0x0b, 0xd3, 0x98, 0xa7, 0x83, 0x8f, 0x69, 0xae, - 0xc8, 0x59, 0x60, 0x52, 0x8b, 0x9a, 0xb4, 0x00, 0x67, 0xd7, 0xae, 0x5d, - 0xf9, 0xdc, 0x67, 0x8e, 0x99, 0xa7, 0x8b, 0x9f, 0xf0, 0x83, 0x9c, 0x05, - 0x22, 0x9f, 0xa1, 0x16, 0x6d, 0x80, 0xb9, 0x7d, 0xfb, 0x76, 0x86, 0xf0, - 0xfc, 0x1d, 0xe6, 0xe9, 0xe4, 0x2b, 0x7c, 0x21, 0x77, 0x05, 0xa5, 0xac, - 0x60, 0xee, 0xbb, 0x95, 0x28, 0x7c, 0x1c, 0xba, 0x9f, 0x47, 0xb9, 0xd8, - 0x64, 0xbf, 0x78, 0xf1, 0x42, 0x74, 0xea, 0xe4, 0x3c, 0x66, 0xf2, 0xf2, - 0xe5, 0x4b, 0xd1, 0xb9, 0x73, 0xe7, 0xd8, 0xf8, 0xe2, 0xc3, 0xd0, 0xff, - 0xa2, 0x4c, 0x05, 0xd4, 0xf5, 0x11, 0x76, 0xaf, 0x8b, 0xc0, 0x8d, 0x38, - 0x59, 0x1b, 0xf2, 0x7d, 0x00, 0xa6, 0x5b, 0x11, 0x72, 0x47, 0x0e, 0x5d, - 0xa5, 0x50, 0x0f, 0xc0, 0x91, 0x25, 0x8e, 0x33, 0xf3, 0x4d, 0x1a, 0xda, - 0x48, 0xc2, 0x7a, 0x00, 0xf2, 0xf6, 0x14, 0x3a, 0x14, 0xfa, 0x3f, 0x1e, - 0xe4, 0x4b, 0xa1, 0x1e, 0xe0, 0x97, 0x28, 0xac, 0x15, 0xf9, 0xf9, 0xce, - 0x27, 0xe4, 0x98, 0x1c, 0x92, 0x4b, 0x47, 0x71, 0xeb, 0x01, 0x7a, 0xa1, - 0xf4, 0x7f, 0xa0, 0x3d, 0x1d, 0xcf, 0x8a, 0x71, 0x62, 0x02, 0x7b, 0x00, - 0xb2, 0xf5, 0x18, 0x3a, 0x0c, 0xca, 0x25, 0x65, 0x39, 0xe2, 0xd6, 0x03, - 0xfc, 0x02, 0xa5, 0xb4, 0x23, 0x3f, 0xc7, 0xf3, 0x64, 0x1d, 0x90, 0x4b, - 0x72, 0xda, 0x4a, 0x9c, 0x7a, 0x80, 0x2e, 0x28, 0xc5, 0x15, 0xa8, 0xbc, - 0x06, 0xd0, 0x4e, 0x12, 0xda, 0x03, 0x90, 0x47, 0x5e, 0x03, 0x70, 0xdd, - 0xc0, 0x73, 0x1e, 0xd8, 0xe2, 0xd4, 0x03, 0x70, 0x9a, 0x57, 0x4b, 0xf2, - 0x6d, 0xa7, 0x13, 0xba, 0x25, 0xa7, 0xad, 0xa6, 0xf0, 0x3b, 0x38, 0x80, - 0xf1, 0x73, 0x87, 0xb4, 0xc8, 0x93, 0x78, 0x7f, 0x3e, 0x67, 0xce, 0x1c, - 0x31, 0x7d, 0xfa, 0x74, 0x31, 0x68, 0xd0, 0x20, 0xd1, 0xbb, 0x77, 0xef, - 0x92, 0x6c, 0xea, 0xd8, 0xb1, 0xa3, 0xeb, 0x79, 0xcc, 0x3b, 0x74, 0xe8, - 0x90, 0x6b, 0x7e, 0xa1, 0x8c, 0xc6, 0xc6, 0x46, 0x6b, 0x5e, 0xe1, 0xf4, - 0xe9, 0xd3, 0xe2, 0xf8, 0xf1, 0xe3, 0x82, 0x3d, 0x8d, 0x82, 0xf2, 0x33, - 0xd8, 0xf4, 0x97, 0x42, 0x76, 0xfd, 0x10, 0x99, 0x4a, 0x3d, 0xc7, 0xd7, - 0xa5, 0x4b, 0x97, 0xd4, 0xa6, 0x4d, 0x9b, 0x52, 0x4d, 0x4d, 0x4d, 0xf9, - 0x03, 0x77, 0xca, 0x1e, 0xd3, 0x56, 0xda, 0x4c, 0xdb, 0x81, 0xa7, 0x4a, - 0x4a, 0x6e, 0xc9, 0xb1, 0xab, 0x6c, 0x40, 0x8e, 0x32, 0x06, 0x8f, 0x18, - 0x31, 0x22, 0x75, 0xe3, 0xc6, 0x0d, 0x65, 0x89, 0xf6, 0x32, 0x8c, 0xb6, - 0xd3, 0x07, 0x95, 0x30, 0x85, 0x2d, 0xe4, 0xd8, 0x55, 0x94, 0x79, 0x9e, - 0x6f, 0xd4, 0xa8, 0x51, 0xa9, 0xfa, 0xfa, 0x7a, 0x2f, 0x8c, 0x95, 0xcf, - 0xa7, 0x0f, 0xf4, 0x05, 0x88, 0xab, 0xa2, 0xe4, 0xd8, 0x51, 0x3e, 0x44, - 0xaa, 0x12, 0x46, 0x76, 0xed, 0xda, 0x35, 0x75, 0xed, 0xda, 0x35, 0xe5, - 0xc9, 0xf5, 0x6b, 0x20, 0x7d, 0xa1, 0x4f, 0xaa, 0xe0, 0x0b, 0x3b, 0xc8, - 0xb5, 0x25, 0xd9, 0x77, 0x01, 0x3f, 0xb2, 0x13, 0xa3, 0xde, 0xae, 0x5d, - 0xbb, 0x56, 0x8c, 0x1b, 0x37, 0x2e, 0x6a, 0x33, 0x02, 0x6b, 0x9f, 0xbe, - 0xd0, 0x27, 0x85, 0xc4, 0x91, 0x6b, 0xbe, 0x9a, 0x24, 0xf2, 0x28, 0x2d, - 0x2f, 0x2f, 0x8f, 0xd5, 0x05, 0x9f, 0xdf, 0x5e, 0x80, 0x17, 0x86, 0xf4, - 0x4d, 0x05, 0x8c, 0x61, 0xc3, 0x97, 0x76, 0x30, 0xda, 0x3d, 0x00, 0x6f, - 0x07, 0x67, 0xd8, 0x89, 0x51, 0x6e, 0x6b, 0x6a, 0x6a, 0x44, 0xcf, 0x9e, - 0xfa, 0x0d, 0x42, 0xd2, 0x27, 0xfa, 0xa6, 0x88, 0x54, 0xc1, 0x0e, 0x6b, - 0x08, 0xc0, 0x0e, 0x80, 0x4a, 0x24, 0x28, 0x31, 0xf1, 0x33, 0x77, 0xee, - 0x5c, 0x45, 0x30, 0x0a, 0xde, 0x0c, 0x85, 0x7c, 0x23, 0xd7, 0xe4, 0x3c, - 0xf3, 0xa6, 0xca, 0xd9, 0xc1, 0xbb, 0x5b, 0x5a, 0x8d, 0x43, 0x87, 0x0e, - 0x2d, 0xed, 0xc4, 0x18, 0x9c, 0xa5, 0x98, 0x6f, 0x16, 0xe7, 0x76, 0x0f, - 0x30, 0x5d, 0x15, 0xfc, 0xfa, 0xf7, 0xef, 0xaf, 0x8a, 0x29, 0x81, 0xdb, - 0xa1, 0x98, 0x6f, 0x16, 0xe7, 0x76, 0x00, 0x8c, 0x09, 0xdc, 0xdb, 0x12, - 0x2b, 0xec, 0xd0, 0xc1, 0xfa, 0x69, 0x2a, 0xf1, 0x6c, 0xb5, 0x4f, 0x53, - 0xcc, 0x37, 0x8b, 0x73, 0x06, 0x40, 0x37, 0x28, 0x67, 0x89, 0x8c, 0x24, - 0x0b, 0x01, 0x72, 0xde, 0x8d, 0x01, 0xc0, 0x45, 0x83, 0x4e, 0xd3, 0xc2, - 0xc9, 0x82, 0x23, 0x79, 0xde, 0x92, 0xf3, 0x0a, 0x3b, 0x00, 0x92, 0xe7, - 0xbe, 0xf1, 0x98, 0x08, 0x58, 0x01, 0x10, 0xe8, 0x13, 0x24, 0x51, 0xe0, - 0x7a, 0xe2, 0xc4, 0x09, 0xb1, 0x62, 0xc5, 0x0a, 0x31, 0x71, 0xe2, 0x44, - 0x31, 0x73, 0xe6, 0x4c, 0xb1, 0x7e, 0xfd, 0x7a, 0x71, 0xff, 0xfe, 0xfd, - 0xc0, 0x4c, 0x39, 0x7b, 0xf6, 0xac, 0x58, 0xb9, 0x72, 0xa5, 0x98, 0x30, - 0x61, 0x82, 0x98, 0x3f, 0x7f, 0xbe, 0xd8, 0xb6, 0x6d, 0x9b, 0x78, 0xf5, - 0xea, 0x55, 0x60, 0xf5, 0x47, 0x58, 0x91, 0xc5, 0xfd, 0x9f, 0x61, 0x40, - 0x28, 0x23, 0x54, 0x3d, 0x7a, 0xf4, 0xb0, 0x66, 0xc7, 0x38, 0x43, 0x46, - 0x1d, 0x3c, 0x78, 0x70, 0xab, 0x76, 0x8b, 0x99, 0x03, 0x78, 0xfb, 0xf6, - 0x6d, 0x6a, 0xf5, 0xea, 0xd5, 0xad, 0xea, 0xa0, 0x3f, 0x58, 0x2f, 0x90, - 0x3a, 0x78, 0xf0, 0xa0, 0xdf, 0x81, 0x3a, 0xd7, 0x72, 0x3b, 0x76, 0xec, - 0x48, 0x61, 0x9d, 0x40, 0xab, 0x36, 0x2a, 0x2b, 0x2b, 0x8b, 0x9e, 0xac, - 0xa2, 0x6f, 0xf9, 0x58, 0x13, 0x03, 0x1b, 0x0f, 0x6e, 0x89, 0x51, 0x7e, - 0x19, 0x89, 0xc7, 0xe4, 0x5e, 0x1c, 0x91, 0xd8, 0x80, 0xe5, 0xcc, 0xe4, - 0xc9, 0x93, 0x53, 0x17, 0x2e, 0x5c, 0x48, 0x91, 0xb0, 0x6c, 0x71, 0x02, - 0xa4, 0x98, 0x00, 0xd8, 0xbe, 0x7d, 0x7b, 0x41, 0xb0, 0xba, 0x77, 0xef, - 0x9e, 0xba, 0x73, 0xe7, 0x4e, 0x76, 0x93, 0x45, 0xed, 0x5f, 0xb9, 0x72, - 0x25, 0x85, 0x2b, 0x77, 0xd7, 0x36, 0x96, 0x2d, 0x5b, 0x56, 0x54, 0x7d, - 0x7e, 0xfc, 0x25, 0x46, 0xc4, 0x8a, 0x98, 0xc9, 0xe6, 0x25, 0xcd, 0xbd, - 0x38, 0x25, 0xb3, 0xa1, 0xb1, 0x63, 0xc7, 0xa6, 0xb0, 0x3a, 0xc6, 0x11, - 0x28, 0x3f, 0x80, 0x38, 0x9e, 0x88, 0xc4, 0xe7, 0xcf, 0x9f, 0xfb, 0x9a, - 0x61, 0x5b, 0xba, 0x74, 0xa9, 0x5b, 0x15, 0x9e, 0xe9, 0x4b, 0x96, 0x2c, - 0xf1, 0x24, 0xe1, 0xe6, 0xcd, 0x9b, 0x9e, 0xf5, 0xd8, 0x05, 0x8a, 0xf1, - 0x97, 0x98, 0x11, 0x3b, 0x99, 0xdc, 0xa0, 0xee, 0x53, 0xbc, 0x08, 0xec, - 0x01, 0x95, 0x26, 0x5b, 0xb6, 0x6c, 0x71, 0x7d, 0x14, 0xab, 0x2d, 0x8d, - 0xe2, 0xdb, 0x29, 0x9e, 0x3d, 0x7b, 0xe6, 0x59, 0xc5, 0x99, 0x33, 0x67, - 0x3c, 0xcb, 0xb8, 0x15, 0x38, 0x77, 0x8e, 0xaf, 0xde, 0x29, 0x2c, 0xe7, - 0xcf, 0x9f, 0x2f, 0x5c, 0xa0, 0xc4, 0x5c, 0x3e, 0xbe, 0x46, 0xec, 0x24, - 0x4b, 0x0f, 0x06, 0x80, 0xd4, 0x17, 0x3e, 0x4c, 0x9d, 0x3a, 0x55, 0x8a, - 0x0f, 0x77, 0xef, 0xde, 0xf5, 0x55, 0xef, 0xbd, 0x7b, 0xf7, 0x04, 0xba, - 0x55, 0x5f, 0x65, 0xb3, 0x0b, 0xf1, 0x9c, 0x07, 0x0f, 0x1e, 0x64, 0x27, - 0x39, 0xee, 0xb3, 0x7e, 0x59, 0x22, 0x0b, 0xbb, 0x2c, 0x7b, 0xcb, 0xa5, - 0x07, 0x80, 0xac, 0xe1, 0xcf, 0xf1, 0xe3, 0xfd, 0xbd, 0xab, 0x02, 0xdd, - 0xa8, 0x68, 0xd7, 0x8e, 0x6e, 0x16, 0x27, 0x3c, 0x67, 0xf4, 0xe8, 0xd1, - 0x9e, 0x27, 0x8d, 0x19, 0x23, 0x6f, 0x10, 0x55, 0x16, 0x76, 0x59, 0x4e, - 0xc9, 0x0f, 0x80, 0xac, 0xc6, 0x02, 0xdd, 0xad, 0xa8, 0xa8, 0x10, 0xc3, - 0x87, 0x0f, 0xf7, 0xac, 0x73, 0xde, 0xbc, 0x79, 0x9e, 0x65, 0xdc, 0x0a, - 0xcc, 0x9a, 0x35, 0xcb, 0x2d, 0xcb, 0x4a, 0xc7, 0xa2, 0x4f, 0x11, 0xc2, - 0xb7, 0xb4, 0xa0, 0x0d, 0x6d, 0xcc, 0xb4, 0x7a, 0x7f, 0xa9, 0x6f, 0xfe, - 0xb0, 0x2f, 0x80, 0x9c, 0xb6, 0xc5, 0x5c, 0x14, 0x39, 0x9d, 0x7f, 0xf2, - 0xe4, 0xc9, 0x54, 0x59, 0x59, 0x99, 0xeb, 0x85, 0x12, 0xd7, 0xe2, 0xb5, - 0xb4, 0xb4, 0x38, 0x9d, 0xea, 0x2b, 0xad, 0xa1, 0xa1, 0x21, 0xd5, 0xb7, - 0x6f, 0x5f, 0xd7, 0xfa, 0x37, 0x6f, 0xde, 0xec, 0xab, 0x1e, 0xbb, 0x50, - 0x29, 0xfe, 0x82, 0x60, 0xd7, 0xf6, 0x03, 0xc8, 0x7b, 0xc1, 0xbe, 0xb1, - 0xb9, 0x8d, 0x51, 0x14, 0xd9, 0xe9, 0x1c, 0xf4, 0xd9, 0xbf, 0x7f, 0xbf, - 0xc0, 0xab, 0x5d, 0x5a, 0xd9, 0x30, 0x63, 0xc6, 0x0c, 0x71, 0xec, 0xd8, - 0x31, 0x81, 0xb5, 0x78, 0xad, 0xf2, 0xfc, 0x26, 0xf4, 0xe9, 0xd3, 0x47, - 0x1c, 0x3d, 0x7a, 0x54, 0x8c, 0x1c, 0x39, 0x32, 0xe7, 0x14, 0x04, 0x9d, - 0xd8, 0xb0, 0x61, 0x83, 0x58, 0xb7, 0x6e, 0x5d, 0x4e, 0x7a, 0x0c, 0x0f, - 0x9a, 0x39, 0xf5, 0xc6, 0x00, 0xe8, 0x13, 0x43, 0xe3, 0x2d, 0x93, 0x17, - 0x2f, 0x5e, 0x2c, 0xaa, 0xaa, 0xaa, 0xac, 0x87, 0x31, 0x2e, 0x5e, 0xbc, - 0x28, 0x7a, 0xf5, 0xea, 0x25, 0x70, 0x0f, 0x2d, 0x66, 0xcf, 0x9e, 0x2d, - 0x48, 0x54, 0x5b, 0x65, 0xd2, 0xa4, 0x49, 0xe2, 0xd2, 0xa5, 0x4b, 0xe2, - 0xc8, 0x91, 0x23, 0x82, 0x57, 0xfc, 0x03, 0x07, 0x0e, 0x14, 0xfc, 0x69, - 0xe0, 0xa8, 0xa0, 0x06, 0x62, 0x7d, 0xf9, 0xaf, 0xc2, 0x11, 0x69, 0xdd, - 0x8c, 0xdd, 0xfd, 0x39, 0x6d, 0x4b, 0xe9, 0x12, 0x9d, 0xea, 0x89, 0x4b, - 0x5a, 0x29, 0xfe, 0xca, 0xe4, 0x06, 0x75, 0x5f, 0xe5, 0x4f, 0xc0, 0x13, - 0xa8, 0x91, 0x64, 0x22, 0xf0, 0x24, 0xd6, 0xd7, 0x00, 0xc9, 0xe4, 0x2c, - 0x50, 0xaf, 0x9b, 0x19, 0x00, 0x8d, 0x81, 0x56, 0x69, 0x2a, 0x8b, 0x13, - 0x02, 0x8d, 0x0c, 0x80, 0xdb, 0x71, 0xb2, 0xd8, 0xd8, 0x1a, 0x28, 0x02, - 0xb7, 0x19, 0x00, 0xdf, 0x04, 0x5a, 0xa5, 0xa9, 0x2c, 0x4e, 0x08, 0x7c, - 0xc3, 0x00, 0xe0, 0x7b, 0x00, 0x8d, 0x24, 0x13, 0x81, 0x4c, 0x00, 0xf0, - 0x36, 0xd0, 0x48, 0xb2, 0x10, 0x20, 0xe7, 0xb7, 0xd8, 0x03, 0xb4, 0x40, - 0xe5, 0x4d, 0x69, 0x25, 0x0b, 0xd4, 0x38, 0x79, 0x4b, 0xce, 0x5b, 0x18, - 0x00, 0x94, 0x9b, 0xef, 0x36, 0xe6, 0x33, 0x41, 0x08, 0x58, 0x9c, 0x73, - 0x28, 0x98, 0x72, 0x0a, 0x3a, 0xd7, 0xda, 0x8b, 0xd9, 0xc7, 0x9b, 0x37, - 0x6f, 0x04, 0x46, 0x02, 0x5d, 0xad, 0xce, 0x7f, 0x18, 0x83, 0xf3, 0xfc, - 0x85, 0xd6, 0x07, 0xb4, 0x6f, 0xdf, 0x3e, 0x67, 0x08, 0xf9, 0xf5, 0xeb, - 0xd7, 0xae, 0x75, 0x73, 0xa8, 0x99, 0xe5, 0x63, 0x2a, 0xe4, 0x3c, 0xf3, - 0x6c, 0xe0, 0xdf, 0x65, 0x39, 0xd1, 0xdc, 0x2c, 0x77, 0xae, 0x69, 0xf9, - 0xf2, 0xe5, 0x82, 0x2f, 0x77, 0x72, 0xd3, 0xeb, 0xd7, 0xaf, 0xe7, 0xb8, - 0xb6, 0x75, 0xeb, 0x56, 0xd7, 0xb2, 0xac, 0x63, 0xdf, 0xbe, 0x7d, 0x99, - 0xf2, 0x3c, 0xd7, 0xad, 0x5e, 0xa6, 0xb3, 0x6d, 0x99, 0x22, 0x19, 0x3b, - 0x8b, 0x73, 0xfb, 0x27, 0xe0, 0x2c, 0x1c, 0x91, 0xc2, 0xd4, 0xe5, 0xcb, - 0x97, 0x65, 0x62, 0xa4, 0x75, 0xdd, 0x12, 0xb1, 0x23, 0xd7, 0xe4, 0x3c, - 0xd3, 0x03, 0xb0, 0x9f, 0xfb, 0x27, 0x13, 0x82, 0x16, 0xcc, 0x99, 0x07, - 0x5d, 0xa5, 0xd4, 0xfa, 0x0a, 0xfd, 0x9c, 0x48, 0x6d, 0xd8, 0xa1, 0x72, - 0x89, 0xd8, 0x91, 0x6b, 0xeb, 0xb7, 0xcd, 0xee, 0x01, 0xd8, 0xbc, 0x94, - 0x9f, 0x01, 0xce, 0xa7, 0xaf, 0x5a, 0xb5, 0xca, 0xd7, 0x02, 0x4e, 0x07, - 0x0c, 0x42, 0x4f, 0x0a, 0x62, 0x0a, 0xb9, 0xad, 0x46, 0x73, 0xb1, 0x2b, - 0x31, 0x23, 0x76, 0x92, 0x24, 0xc3, 0xb5, 0x7d, 0x11, 0xc8, 0x76, 0x32, - 0x89, 0x41, 0x37, 0xba, 0x73, 0xe7, 0x4e, 0xb1, 0x77, 0xef, 0x5e, 0x6b, - 0x9e, 0x3e, 0x7b, 0x9d, 0xdb, 0xe3, 0xc7, 0x7c, 0x87, 0x71, 0xdb, 0x64, - 0xcd, 0x9a, 0x35, 0x62, 0xe1, 0xc2, 0x85, 0xae, 0x95, 0x0c, 0x19, 0x32, - 0x24, 0x27, 0x6f, 0xd1, 0xa2, 0x45, 0x62, 0xd8, 0xb0, 0x61, 0x39, 0x69, - 0xd9, 0x07, 0xd3, 0xa6, 0x4d, 0xcb, 0x1c, 0xf2, 0xdc, 0x3d, 0x7b, 0xf6, - 0x64, 0x8e, 0xf3, 0x77, 0x82, 0x78, 0xde, 0x7f, 0xe3, 0xc6, 0x8d, 0x39, - 0x6f, 0x44, 0xc1, 0x5b, 0xc5, 0x04, 0x57, 0x23, 0x07, 0x81, 0x4d, 0xbe, - 0xbd, 0x59, 0xc7, 0xae, 0x5c, 0xdf, 0x40, 0x21, 0x69, 0x6b, 0x03, 0xfc, - 0xd4, 0xcd, 0x39, 0x73, 0x5d, 0xc5, 0x69, 0x3d, 0x80, 0x1f, 0x4c, 0x02, - 0x2e, 0x43, 0x8e, 0x33, 0x92, 0xfd, 0x13, 0xc0, 0xc4, 0x3f, 0x66, 0x72, - 0xcc, 0x8e, 0xae, 0x08, 0xe4, 0x70, 0x9c, 0x1f, 0x00, 0x7f, 0x82, 0xd7, - 0xc5, 0x2f, 0xa2, 0xd7, 0x15, 0x2a, 0xfd, 0xfc, 0x22, 0xb7, 0xe4, 0x38, - 0x23, 0xf9, 0x01, 0x50, 0x87, 0x9c, 0xe3, 0x99, 0xdc, 0x08, 0x76, 0x0a, - 0x0d, 0xbc, 0x44, 0x60, 0x4e, 0xa0, 0x4d, 0x2a, 0xf0, 0x44, 0x31, 0xb9, - 0x25, 0xc7, 0x19, 0xc9, 0x0f, 0x00, 0x66, 0xe4, 0x74, 0x11, 0x99, 0x92, - 0x21, 0xed, 0xe0, 0x0f, 0x1c, 0x43, 0x6a, 0x29, 0xfc, 0x66, 0x1e, 0x3d, - 0x7a, 0x14, 0x7e, 0xa3, 0xb9, 0x2d, 0xb6, 0xe2, 0xd6, 0x29, 0x00, 0x0e, - 0xe2, 0x1c, 0xfe, 0xb9, 0x40, 0x24, 0x22, 0x71, 0xf0, 0x23, 0x12, 0x7f, - 0xb2, 0x1b, 0x8d, 0xd8, 0x37, 0x72, 0x4a, 0x6e, 0x73, 0xc4, 0x29, 0x00, - 0x9e, 0xa3, 0xc4, 0xef, 0x72, 0x4a, 0x85, 0x78, 0x70, 0xe0, 0xc0, 0x81, - 0x10, 0x5b, 0x0b, 0xb7, 0xa9, 0x88, 0x7d, 0x23, 0xa7, 0xe4, 0xd6, 0x97, - 0xf4, 0x42, 0xa9, 0x26, 0x68, 0x24, 0xb7, 0x84, 0x18, 0x00, 0xd1, 0xee, - 0x4e, 0x90, 0x3e, 0x45, 0x85, 0x67, 0x9a, 0x4b, 0x72, 0x5a, 0x94, 0xfc, - 0x1a, 0xa5, 0x23, 0x31, 0x1a, 0x03, 0x35, 0x29, 0x3e, 0x96, 0xa5, 0x8b, - 0xd0, 0x17, 0xfa, 0x14, 0x15, 0x9e, 0x68, 0x97, 0x5c, 0x16, 0x2d, 0xfc, - 0x8f, 0x19, 0x4e, 0x1a, 0x44, 0x62, 0xf8, 0x94, 0x29, 0x53, 0x8a, 0x7e, - 0x05, 0x8b, 0x8a, 0x01, 0xc3, 0xff, 0x0b, 0xa0, 0x2f, 0x51, 0xe1, 0x98, - 0xe6, 0x90, 0x5c, 0x96, 0x24, 0x5b, 0x71, 0x56, 0x64, 0xc6, 0x63, 0x28, - 0x36, 0x85, 0x21, 0xe4, 0x56, 0xaf, 0x96, 0x51, 0x91, 0xe8, 0x7c, 0x9b, - 0xb0, 0xe6, 0xc0, 0xb2, 0x9d, 0x3e, 0x44, 0x89, 0x21, 0xda, 0x26, 0x87, - 0xae, 0x52, 0xe6, 0x9a, 0xf3, 0x2e, 0x83, 0xff, 0xcc, 0xc4, 0x45, 0xa3, - 0xad, 0x9f, 0xbe, 0xf4, 0x38, 0x31, 0xc8, 0x6c, 0xbc, 0x48, 0x49, 0x2c, - 0x58, 0xb0, 0x40, 0xe0, 0x25, 0x4a, 0x25, 0xff, 0x59, 0x54, 0x90, 0xf6, - 0x14, 0xaa, 0x8b, 0x7f, 0x1e, 0x85, 0xf7, 0x12, 0x89, 0xc3, 0x87, 0x0f, - 0x0b, 0x99, 0x2f, 0x8f, 0x28, 0x64, 0x43, 0x56, 0x9e, 0xe7, 0x9f, 0x47, - 0x67, 0x95, 0x75, 0xdd, 0xfd, 0x0c, 0x39, 0x51, 0x47, 0xb1, 0x69, 0xbf, - 0x34, 0x0e, 0x96, 0xbb, 0xb2, 0x5a, 0x64, 0x46, 0xad, 0x09, 0x82, 0xd8, - 0x7d, 0x09, 0xc8, 0x99, 0xa7, 0x78, 0xfd, 0x04, 0xd8, 0x15, 0x7c, 0x80, - 0x9d, 0x2b, 0xd0, 0x4e, 0x76, 0x82, 0xd9, 0x2a, 0x8d, 0xc0, 0x4b, 0x58, - 0xc7, 0xe7, 0xd7, 0x3d, 0x1f, 0xfa, 0xf1, 0xbb, 0xa2, 0xb1, 0x01, 0x95, - 0xbd, 0x07, 0x9d, 0x01, 0x35, 0xa2, 0x3e, 0x02, 0xbc, 0xf0, 0xfb, 0x6b, - 0xd0, 0x66, 0x32, 0x00, 0xa4, 0xbe, 0x4b, 0x00, 0xf5, 0x9b, 0xdf, 0xfa, - 0xb6, 0x63, 0x40, 0x8e, 0xc8, 0x95, 0x14, 0xe1, 0x6b, 0xb3, 0x9e, 0x40, - 0x0d, 0x51, 0x6a, 0x62, 0x40, 0x6e, 0xc8, 0x91, 0x54, 0xf9, 0x14, 0xb5, - 0x9b, 0x00, 0x50, 0x13, 0x03, 0x72, 0x13, 0x8a, 0xfc, 0x1e, 0xad, 0x98, - 0x20, 0x50, 0x0b, 0x03, 0x72, 0x12, 0x9a, 0xf0, 0x6e, 0x80, 0xef, 0x51, - 0x35, 0x41, 0xa0, 0x06, 0x06, 0xe4, 0x22, 0xf4, 0x3b, 0x34, 0xfe, 0xe5, - 0xc8, 0x5d, 0x13, 0x04, 0x91, 0x7f, 0x09, 0xc8, 0x01, 0xb9, 0x88, 0x44, - 0x78, 0xc1, 0x51, 0x0f, 0x35, 0x3d, 0x41, 0x34, 0x18, 0x10, 0x7b, 0xe9, - 0x17, 0x7d, 0x5e, 0x91, 0x35, 0x09, 0x05, 0xcc, 0x9d, 0x41, 0xf8, 0x01, - 0x40, 0xcc, 0x89, 0xbd, 0x12, 0x52, 0x0d, 0x2b, 0xbe, 0x87, 0x9a, 0x9e, - 0x20, 0x1c, 0x0c, 0x88, 0x35, 0x31, 0x57, 0x4a, 0x3e, 0x81, 0x35, 0x26, - 0x08, 0xe4, 0x07, 0x00, 0x31, 0x26, 0xd6, 0x4a, 0x4a, 0x35, 0xac, 0x8a, - 0x6c, 0x29, 0x19, 0xda, 0xd6, 0xbd, 0x07, 0x22, 0xb6, 0xc4, 0x58, 0x69, - 0xe1, 0x8b, 0xfc, 0xef, 0x41, 0x75, 0x27, 0x23, 0x6c, 0xff, 0x88, 0xa9, - 0xbf, 0x3f, 0x49, 0x50, 0x20, 0x3c, 0xde, 0x87, 0x0d, 0x91, 0x3f, 0x67, - 0xa8, 0x51, 0x10, 0x12, 0x4b, 0x62, 0x1a, 0x2b, 0xe1, 0x6a, 0xa2, 0x5a, - 0x68, 0xd8, 0xdf, 0x14, 0xdd, 0xda, 0x23, 0x86, 0xc4, 0x32, 0x96, 0xc2, - 0xe9, 0xe6, 0x2f, 0xa0, 0x6f, 0xa0, 0xba, 0x11, 0x23, 0xdb, 0x1f, 0x62, - 0x46, 0xec, 0xfc, 0x4e, 0xd9, 0xa3, 0xa8, 0xba, 0x52, 0x0d, 0xd3, 0xf8, - 0x4c, 0x9a, 0x6c, 0xd0, 0x74, 0xa9, 0x9f, 0x58, 0x11, 0x33, 0xad, 0xa4, - 0x2f, 0xbc, 0xf9, 0x0a, 0xaa, 0x0b, 0x49, 0xb2, 0xfc, 0x20, 0x46, 0xc4, - 0x4a, 0x4b, 0x29, 0x83, 0x57, 0xab, 0xa0, 0xdf, 0x41, 0x65, 0x01, 0x18, - 0xd7, 0x7a, 0x89, 0x09, 0xb1, 0x21, 0x46, 0xda, 0x0b, 0x23, 0xfc, 0x0f, - 0xd0, 0xb7, 0xd0, 0xb8, 0x12, 0x16, 0x94, 0xdd, 0xc4, 0x80, 0x58, 0x68, - 0xfb, 0xad, 0x87, 0x6f, 0xae, 0x52, 0x89, 0x9c, 0x24, 0x4f, 0x2b, 0xd3, - 0x77, 0x62, 0x90, 0x68, 0xe1, 0x13, 0xca, 0x9f, 0x43, 0xbf, 0x85, 0x06, - 0xf5, 0xad, 0x52, 0xbd, 0x1e, 0xfa, 0x4a, 0x9f, 0xe9, 0xbb, 0x91, 0x34, - 0x02, 0xbc, 0xdd, 0xf9, 0x29, 0x94, 0xcb, 0xcf, 0x55, 0x27, 0xb0, 0x54, - 0xfb, 0xe8, 0x1b, 0x7d, 0xd4, 0xe2, 0xd6, 0x0e, 0x7e, 0x48, 0x93, 0x8f, - 0x51, 0xf3, 0x69, 0x68, 0xa9, 0x40, 0xab, 0x76, 0x1e, 0x7d, 0xa1, 0x4f, - 0x46, 0x8a, 0x44, 0xe0, 0x23, 0x94, 0xff, 0x2d, 0x94, 0xff, 0xe2, 0xac, - 0x1a, 0xa9, 0x5e, 0xf6, 0xd0, 0x66, 0xda, 0x4e, 0x1f, 0x8c, 0xb4, 0x11, - 0x01, 0x76, 0x99, 0x35, 0xd0, 0xdd, 0xd0, 0xa7, 0x50, 0x2f, 0xf0, 0xa3, - 0xca, 0xa7, 0x6d, 0xb4, 0x91, 0xb6, 0xc6, 0xa2, 0x9b, 0x8f, 0xe3, 0x3d, - 0x67, 0x37, 0x80, 0x3b, 0x0b, 0x3a, 0x33, 0xad, 0x1f, 0x62, 0x1b, 0xd5, - 0xc5, 0x14, 0x6f, 0xe1, 0xae, 0x42, 0x6b, 0xd3, 0x7a, 0x12, 0x5b, 0xfe, - 0x01, 0x47, 0x6c, 0x24, 0x8e, 0x01, 0x90, 0x0f, 0x2e, 0x27, 0x4a, 0xaa, - 0xa0, 0x0c, 0x08, 0xde, 0x52, 0x55, 0x40, 0x65, 0x4d, 0x9e, 0xf0, 0x2f, - 0xf6, 0xf8, 0xb8, 0x3c, 0xdf, 0xb4, 0x4d, 0xd2, 0xbf, 0x86, 0x32, 0x2d, - 0xb6, 0xa2, 0x43, 0x00, 0x38, 0x81, 0xcf, 0x41, 0x15, 0x06, 0x02, 0x17, - 0x4c, 0x72, 0x3b, 0x02, 0xca, 0x77, 0xe4, 0x74, 0x87, 0x96, 0xa7, 0xd5, - 0xde, 0xc7, 0xa1, 0xf5, 0x26, 0x14, 0x76, 0xdf, 0x7c, 0x23, 0x0a, 0x95, - 0xfb, 0x5c, 0x7c, 0x71, 0x07, 0x4a, 0xc2, 0x6f, 0xa5, 0xb7, 0x7c, 0x46, - 0x52, 0x2b, 0xf9, 0x3f, 0x92, 0xc9, 0x00, 0xb6, 0x61, 0xee, 0xab, 0xc9, - 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82, -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member.go deleted file mode 100644 index e38bfffe..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "net/http" - "strings" -) - -const ( - CHANNEL_NOTIFY_DEFAULT = "default" - CHANNEL_NOTIFY_ALL = "all" - CHANNEL_NOTIFY_MENTION = "mention" - CHANNEL_NOTIFY_NONE = "none" - CHANNEL_MARK_UNREAD_ALL = "all" - CHANNEL_MARK_UNREAD_MENTION = "mention" - IGNORE_CHANNEL_MENTIONS_DEFAULT = "default" - IGNORE_CHANNEL_MENTIONS_OFF = "off" - IGNORE_CHANNEL_MENTIONS_ON = "on" - IGNORE_CHANNEL_MENTIONS_NOTIFY_PROP = "ignore_channel_mentions" -) - -type ChannelUnread struct { - TeamId string `json:"team_id"` - ChannelId string `json:"channel_id"` - MsgCount int64 `json:"msg_count"` - MentionCount int64 `json:"mention_count"` - NotifyProps StringMap `json:"-"` -} - -type ChannelUnreadAt struct { - TeamId string `json:"team_id"` - UserId string `json:"user_id"` - ChannelId string `json:"channel_id"` - MsgCount int64 `json:"msg_count"` - MentionCount int64 `json:"mention_count"` - LastViewedAt int64 `json:"last_viewed_at"` - NotifyProps StringMap `json:"-"` -} - -type ChannelMember struct { - ChannelId string `json:"channel_id"` - UserId string `json:"user_id"` - Roles string `json:"roles"` - LastViewedAt int64 `json:"last_viewed_at"` - MsgCount int64 `json:"msg_count"` - MentionCount int64 `json:"mention_count"` - NotifyProps StringMap `json:"notify_props"` - LastUpdateAt int64 `json:"last_update_at"` - SchemeGuest bool `json:"scheme_guest"` - SchemeUser bool `json:"scheme_user"` - SchemeAdmin bool `json:"scheme_admin"` - ExplicitRoles string `json:"explicit_roles"` -} - -type ChannelMembers []ChannelMember - -type ChannelMemberForExport struct { - ChannelMember - ChannelName string - Username string -} - -func (o *ChannelMembers) ToJson() string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - -func (o *ChannelUnread) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func (o *ChannelUnreadAt) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func ChannelMembersFromJson(data io.Reader) *ChannelMembers { - var o *ChannelMembers - json.NewDecoder(data).Decode(&o) - return o -} - -func ChannelUnreadFromJson(data io.Reader) *ChannelUnread { - var o *ChannelUnread - json.NewDecoder(data).Decode(&o) - return o -} - -func ChannelUnreadAtFromJson(data io.Reader) *ChannelUnreadAt { - var o *ChannelUnreadAt - json.NewDecoder(data).Decode(&o) - return o -} - -func (o *ChannelMember) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func ChannelMemberFromJson(data io.Reader) *ChannelMember { - var o *ChannelMember - json.NewDecoder(data).Decode(&o) - return o -} - -func (o *ChannelMember) IsValid() *AppError { - - if !IsValidId(o.ChannelId) { - return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) - } - - if !IsValidId(o.UserId) { - return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) - } - - notifyLevel := o.NotifyProps[DESKTOP_NOTIFY_PROP] - if len(notifyLevel) > 20 || !IsChannelNotifyLevelValid(notifyLevel) { - return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", nil, "notify_level="+notifyLevel, http.StatusBadRequest) - } - - markUnreadLevel := o.NotifyProps[MARK_UNREAD_NOTIFY_PROP] - if len(markUnreadLevel) > 20 || !IsChannelMarkUnreadLevelValid(markUnreadLevel) { - return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", nil, "mark_unread_level="+markUnreadLevel, http.StatusBadRequest) - } - - if pushLevel, ok := o.NotifyProps[PUSH_NOTIFY_PROP]; ok { - if len(pushLevel) > 20 || !IsChannelNotifyLevelValid(pushLevel) { - return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.push_level.app_error", nil, "push_notification_level="+pushLevel, http.StatusBadRequest) - } - } - - if sendEmail, ok := o.NotifyProps[EMAIL_NOTIFY_PROP]; ok { - if len(sendEmail) > 20 || !IsSendEmailValid(sendEmail) { - return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.email_value.app_error", nil, "push_notification_level="+sendEmail, http.StatusBadRequest) - } - } - - if ignoreChannelMentions, ok := o.NotifyProps[IGNORE_CHANNEL_MENTIONS_NOTIFY_PROP]; ok { - if len(ignoreChannelMentions) > 40 || !IsIgnoreChannelMentionsValid(ignoreChannelMentions) { - return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.ignore_channel_mentions_value.app_error", nil, "ignore_channel_mentions="+ignoreChannelMentions, http.StatusBadRequest) - } - } - - return nil -} - -func (o *ChannelMember) PreSave() { - o.LastUpdateAt = GetMillis() -} - -func (o *ChannelMember) PreUpdate() { - o.LastUpdateAt = GetMillis() -} - -func (o *ChannelMember) GetRoles() []string { - return strings.Fields(o.Roles) -} - -func IsChannelNotifyLevelValid(notifyLevel string) bool { - return notifyLevel == CHANNEL_NOTIFY_DEFAULT || - notifyLevel == CHANNEL_NOTIFY_ALL || - notifyLevel == CHANNEL_NOTIFY_MENTION || - notifyLevel == CHANNEL_NOTIFY_NONE -} - -func IsChannelMarkUnreadLevelValid(markUnreadLevel string) bool { - return markUnreadLevel == CHANNEL_MARK_UNREAD_ALL || markUnreadLevel == CHANNEL_MARK_UNREAD_MENTION -} - -func IsSendEmailValid(sendEmail string) bool { - return sendEmail == CHANNEL_NOTIFY_DEFAULT || sendEmail == "true" || sendEmail == "false" -} - -func IsIgnoreChannelMentionsValid(ignoreChannelMentions string) bool { - return ignoreChannelMentions == IGNORE_CHANNEL_MENTIONS_ON || ignoreChannelMentions == IGNORE_CHANNEL_MENTIONS_OFF || ignoreChannelMentions == IGNORE_CHANNEL_MENTIONS_DEFAULT -} - -func GetDefaultChannelNotifyProps() StringMap { - return StringMap{ - DESKTOP_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT, - MARK_UNREAD_NOTIFY_PROP: CHANNEL_MARK_UNREAD_ALL, - PUSH_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT, - EMAIL_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT, - IGNORE_CHANNEL_MENTIONS_NOTIFY_PROP: IGNORE_CHANNEL_MENTIONS_DEFAULT, - } -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go deleted file mode 100644 index 87fd3aef..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -const CHANNEL_SEARCH_DEFAULT_LIMIT = 50 - -type ChannelSearch struct { - Term string `json:"term"` - ExcludeDefaultChannels bool `json:"exclude_default_channels"` - NotAssociatedToGroup string `json:"not_associated_to_group"` - TeamIds []string `json:"team_ids"` - GroupConstrained bool `json:"group_constrained"` - ExcludeGroupConstrained bool `json:"exclude_group_constrained"` - Public bool `json:"public"` - Private bool `json:"private"` - IncludeDeleted bool `json:"include_deleted"` - Deleted bool `json:"deleted"` - Page *int `json:"page,omitempty"` - PerPage *int `json:"per_page,omitempty"` -} - -// ToJson convert a Channel to a json string -func (c *ChannelSearch) ToJson() string { - b, _ := json.Marshal(c) - return string(b) -} - -// ChannelSearchFromJson will decode the input and return a Channel -func ChannelSearchFromJson(data io.Reader) *ChannelSearch { - var cs *ChannelSearch - json.NewDecoder(data).Decode(&cs) - return cs -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_view.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_view.go deleted file mode 100644 index 42fcac3a..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_view.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type ChannelView struct { - ChannelId string `json:"channel_id"` - PrevChannelId string `json:"prev_channel_id"` -} - -func (o *ChannelView) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func ChannelViewFromJson(data io.Reader) *ChannelView { - var o *ChannelView - json.NewDecoder(data).Decode(&o) - return o -} - -type ChannelViewResponse struct { - Status string `json:"status"` - LastViewedAtTimes map[string]int64 `json:"last_viewed_at_times"` -} - -func (o *ChannelViewResponse) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func ChannelViewResponseFromJson(data io.Reader) *ChannelViewResponse { - var o *ChannelViewResponse - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go b/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go deleted file mode 100644 index 312096ab..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go +++ /dev/null @@ -1,5335 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -const ( - HEADER_REQUEST_ID = "X-Request-ID" - HEADER_VERSION_ID = "X-Version-ID" - HEADER_CLUSTER_ID = "X-Cluster-ID" - HEADER_ETAG_SERVER = "ETag" - HEADER_ETAG_CLIENT = "If-None-Match" - HEADER_FORWARDED = "X-Forwarded-For" - HEADER_REAL_IP = "X-Real-IP" - HEADER_FORWARDED_PROTO = "X-Forwarded-Proto" - HEADER_TOKEN = "token" - HEADER_CSRF_TOKEN = "X-CSRF-Token" - HEADER_BEARER = "BEARER" - HEADER_AUTH = "Authorization" - HEADER_REQUESTED_WITH = "X-Requested-With" - HEADER_REQUESTED_WITH_XML = "XMLHttpRequest" - STATUS = "status" - STATUS_OK = "OK" - STATUS_FAIL = "FAIL" - STATUS_UNHEALTHY = "UNHEALTHY" - STATUS_REMOVE = "REMOVE" - - CLIENT_DIR = "client" - - API_URL_SUFFIX_V1 = "/api/v1" - API_URL_SUFFIX_V4 = "/api/v4" - API_URL_SUFFIX = API_URL_SUFFIX_V4 -) - -type Response struct { - StatusCode int - Error *AppError - RequestId string - Etag string - ServerVersion string - Header http.Header -} - -type Client4 struct { - Url string // The location of the server, for example "http://localhost:8065" - ApiUrl string // The api location of the server, for example "http://localhost:8065/api/v4" - HttpClient *http.Client // The http client - AuthToken string - AuthType string - HttpHeader map[string]string // Headers to be copied over for each request - - // TrueString is the string value sent to the server for true boolean query parameters. - trueString string - - // FalseString is the string value sent to the server for false boolean query parameters. - falseString string -} - -// SetBoolString is a helper method for overriding how true and false query string parameters are -// sent to the server. -// -// This method is only exposed for testing. It is never necessary to configure these values -// in production. -func (c *Client4) SetBoolString(value bool, valueStr string) { - if value { - c.trueString = valueStr - } else { - c.falseString = valueStr - } -} - -// boolString builds the query string parameter for boolean values. -func (c *Client4) boolString(value bool) string { - if value && c.trueString != "" { - return c.trueString - } else if !value && c.falseString != "" { - return c.falseString - } - - if value { - return "true" - } else { - return "false" - } -} - -func closeBody(r *http.Response) { - if r.Body != nil { - _, _ = io.Copy(ioutil.Discard, r.Body) - _ = r.Body.Close() - } -} - -// Must is a convenience function used for testing. -func (c *Client4) Must(result interface{}, resp *Response) interface{} { - if resp.Error != nil { - time.Sleep(time.Second) - panic(resp.Error) - } - - return result -} - -func NewAPIv4Client(url string) *Client4 { - return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", "", map[string]string{}, "", ""} -} - -func BuildErrorResponse(r *http.Response, err *AppError) *Response { - var statusCode int - var header http.Header - if r != nil { - statusCode = r.StatusCode - header = r.Header - } else { - statusCode = 0 - header = make(http.Header) - } - - return &Response{ - StatusCode: statusCode, - Error: err, - Header: header, - } -} - -func BuildResponse(r *http.Response) *Response { - return &Response{ - StatusCode: r.StatusCode, - RequestId: r.Header.Get(HEADER_REQUEST_ID), - Etag: r.Header.Get(HEADER_ETAG_SERVER), - ServerVersion: r.Header.Get(HEADER_VERSION_ID), - Header: r.Header, - } -} - -func (c *Client4) SetToken(token string) { - c.AuthToken = token - c.AuthType = HEADER_BEARER -} - -// MockSession is deprecated in favour of SetToken -func (c *Client4) MockSession(token string) { - c.SetToken(token) -} - -func (c *Client4) SetOAuthToken(token string) { - c.AuthToken = token - c.AuthType = HEADER_TOKEN -} - -func (c *Client4) ClearOAuthToken() { - c.AuthToken = "" - c.AuthType = HEADER_BEARER -} - -func (c *Client4) GetUsersRoute() string { - return "/users" -} - -func (c *Client4) GetUserRoute(userId string) string { - return fmt.Sprintf(c.GetUsersRoute()+"/%v", userId) -} - -func (c *Client4) GetUserCategoryRoute(userID, teamID string) string { - return c.GetUserRoute(userID) + c.GetTeamRoute(teamID) + "/channels/categories" -} - -func (c *Client4) GetUserAccessTokensRoute() string { - return fmt.Sprintf(c.GetUsersRoute() + "/tokens") -} - -func (c *Client4) GetUserAccessTokenRoute(tokenId string) string { - return fmt.Sprintf(c.GetUsersRoute()+"/tokens/%v", tokenId) -} - -func (c *Client4) GetUserByUsernameRoute(userName string) string { - return fmt.Sprintf(c.GetUsersRoute()+"/username/%v", userName) -} - -func (c *Client4) GetUserByEmailRoute(email string) string { - return fmt.Sprintf(c.GetUsersRoute()+"/email/%v", email) -} - -func (c *Client4) GetBotsRoute() string { - return "/bots" -} - -func (c *Client4) GetBotRoute(botUserId string) string { - return fmt.Sprintf("%s/%s", c.GetBotsRoute(), botUserId) -} - -func (c *Client4) GetTeamsRoute() string { - return "/teams" -} - -func (c *Client4) GetTeamRoute(teamId string) string { - return fmt.Sprintf(c.GetTeamsRoute()+"/%v", teamId) -} - -func (c *Client4) GetTeamAutoCompleteCommandsRoute(teamId string) string { - return fmt.Sprintf(c.GetTeamsRoute()+"/%v/commands/autocomplete", teamId) -} - -func (c *Client4) GetTeamByNameRoute(teamName string) string { - return fmt.Sprintf(c.GetTeamsRoute()+"/name/%v", teamName) -} - -func (c *Client4) GetTeamMemberRoute(teamId, userId string) string { - return fmt.Sprintf(c.GetTeamRoute(teamId)+"/members/%v", userId) -} - -func (c *Client4) GetTeamMembersRoute(teamId string) string { - return fmt.Sprintf(c.GetTeamRoute(teamId) + "/members") -} - -func (c *Client4) GetTeamStatsRoute(teamId string) string { - return fmt.Sprintf(c.GetTeamRoute(teamId) + "/stats") -} - -func (c *Client4) GetTeamImportRoute(teamId string) string { - return fmt.Sprintf(c.GetTeamRoute(teamId) + "/import") -} - -func (c *Client4) GetChannelsRoute() string { - return "/channels" -} - -func (c *Client4) GetChannelsForTeamRoute(teamId string) string { - return fmt.Sprintf(c.GetTeamRoute(teamId) + "/channels") -} - -func (c *Client4) GetChannelRoute(channelId string) string { - return fmt.Sprintf(c.GetChannelsRoute()+"/%v", channelId) -} - -func (c *Client4) GetChannelByNameRoute(channelName, teamId string) string { - return fmt.Sprintf(c.GetTeamRoute(teamId)+"/channels/name/%v", channelName) -} - -func (c *Client4) GetChannelsForTeamForUserRoute(teamId, userId string, includeDeleted bool) string { - route := fmt.Sprintf(c.GetUserRoute(userId) + c.GetTeamRoute(teamId) + "/channels") - if includeDeleted { - query := fmt.Sprintf("?include_deleted=%v", includeDeleted) - return route + query - } - return route -} - -func (c *Client4) GetChannelByNameForTeamNameRoute(channelName, teamName string) string { - return fmt.Sprintf(c.GetTeamByNameRoute(teamName)+"/channels/name/%v", channelName) -} - -func (c *Client4) GetChannelMembersRoute(channelId string) string { - return fmt.Sprintf(c.GetChannelRoute(channelId) + "/members") -} - -func (c *Client4) GetChannelMemberRoute(channelId, userId string) string { - return fmt.Sprintf(c.GetChannelMembersRoute(channelId)+"/%v", userId) -} - -func (c *Client4) GetPostsRoute() string { - return "/posts" -} - -func (c *Client4) GetPostsEphemeralRoute() string { - return "/posts/ephemeral" -} - -func (c *Client4) GetConfigRoute() string { - return "/config" -} - -func (c *Client4) GetLicenseRoute() string { - return "/license" -} - -func (c *Client4) GetPostRoute(postId string) string { - return fmt.Sprintf(c.GetPostsRoute()+"/%v", postId) -} - -func (c *Client4) GetFilesRoute() string { - return "/files" -} - -func (c *Client4) GetFileRoute(fileId string) string { - return fmt.Sprintf(c.GetFilesRoute()+"/%v", fileId) -} - -func (c *Client4) GetPluginsRoute() string { - return "/plugins" -} - -func (c *Client4) GetPluginRoute(pluginId string) string { - return fmt.Sprintf(c.GetPluginsRoute()+"/%v", pluginId) -} - -func (c *Client4) GetSystemRoute() string { - return "/system" -} - -func (c *Client4) GetTestEmailRoute() string { - return "/email/test" -} - -func (c *Client4) GetTestSiteURLRoute() string { - return "/site_url/test" -} - -func (c *Client4) GetTestS3Route() string { - return "/file/s3_test" -} - -func (c *Client4) GetDatabaseRoute() string { - return "/database" -} - -func (c *Client4) GetCacheRoute() string { - return "/caches" -} - -func (c *Client4) GetClusterRoute() string { - return "/cluster" -} - -func (c *Client4) GetIncomingWebhooksRoute() string { - return "/hooks/incoming" -} - -func (c *Client4) GetIncomingWebhookRoute(hookID string) string { - return fmt.Sprintf(c.GetIncomingWebhooksRoute()+"/%v", hookID) -} - -func (c *Client4) GetComplianceReportsRoute() string { - return "/compliance/reports" -} - -func (c *Client4) GetComplianceReportRoute(reportId string) string { - return fmt.Sprintf("/compliance/reports/%v", reportId) -} - -func (c *Client4) GetOutgoingWebhooksRoute() string { - return "/hooks/outgoing" -} - -func (c *Client4) GetOutgoingWebhookRoute(hookID string) string { - return fmt.Sprintf(c.GetOutgoingWebhooksRoute()+"/%v", hookID) -} - -func (c *Client4) GetPreferencesRoute(userId string) string { - return fmt.Sprintf(c.GetUserRoute(userId) + "/preferences") -} - -func (c *Client4) GetUserStatusRoute(userId string) string { - return fmt.Sprintf(c.GetUserRoute(userId) + "/status") -} - -func (c *Client4) GetUserStatusesRoute() string { - return fmt.Sprintf(c.GetUsersRoute() + "/status") -} - -func (c *Client4) GetSamlRoute() string { - return "/saml" -} - -func (c *Client4) GetLdapRoute() string { - return "/ldap" -} - -func (c *Client4) GetBrandRoute() string { - return "/brand" -} - -func (c *Client4) GetDataRetentionRoute() string { - return "/data_retention" -} - -func (c *Client4) GetElasticsearchRoute() string { - return "/elasticsearch" -} - -func (c *Client4) GetBleveRoute() string { - return "/bleve" -} - -func (c *Client4) GetCommandsRoute() string { - return "/commands" -} - -func (c *Client4) GetCommandRoute(commandId string) string { - return fmt.Sprintf(c.GetCommandsRoute()+"/%v", commandId) -} - -func (c *Client4) GetCommandMoveRoute(commandId string) string { - return fmt.Sprintf(c.GetCommandsRoute()+"/%v/move", commandId) -} - -func (c *Client4) GetEmojisRoute() string { - return "/emoji" -} - -func (c *Client4) GetEmojiRoute(emojiId string) string { - return fmt.Sprintf(c.GetEmojisRoute()+"/%v", emojiId) -} - -func (c *Client4) GetEmojiByNameRoute(name string) string { - return fmt.Sprintf(c.GetEmojisRoute()+"/name/%v", name) -} - -func (c *Client4) GetReactionsRoute() string { - return "/reactions" -} - -func (c *Client4) GetOAuthAppsRoute() string { - return "/oauth/apps" -} - -func (c *Client4) GetOAuthAppRoute(appId string) string { - return fmt.Sprintf("/oauth/apps/%v", appId) -} - -func (c *Client4) GetOpenGraphRoute() string { - return "/opengraph" -} - -func (c *Client4) GetJobsRoute() string { - return "/jobs" -} - -func (c *Client4) GetRolesRoute() string { - return "/roles" -} - -func (c *Client4) GetSchemesRoute() string { - return "/schemes" -} - -func (c *Client4) GetSchemeRoute(id string) string { - return c.GetSchemesRoute() + fmt.Sprintf("/%v", id) -} - -func (c *Client4) GetAnalyticsRoute() string { - return "/analytics" -} - -func (c *Client4) GetTimezonesRoute() string { - return fmt.Sprintf(c.GetSystemRoute() + "/timezones") -} - -func (c *Client4) GetChannelSchemeRoute(channelId string) string { - return fmt.Sprintf(c.GetChannelsRoute()+"/%v/scheme", channelId) -} - -func (c *Client4) GetTeamSchemeRoute(teamId string) string { - return fmt.Sprintf(c.GetTeamsRoute()+"/%v/scheme", teamId) -} - -func (c *Client4) GetTotalUsersStatsRoute() string { - return fmt.Sprintf(c.GetUsersRoute() + "/stats") -} - -func (c *Client4) GetRedirectLocationRoute() string { - return "/redirect_location" -} - -func (c *Client4) GetServerBusyRoute() string { - return "/server_busy" -} - -func (c *Client4) GetUserTermsOfServiceRoute(userId string) string { - return c.GetUserRoute(userId) + "/terms_of_service" -} - -func (c *Client4) GetTermsOfServiceRoute() string { - return "/terms_of_service" -} - -func (c *Client4) GetGroupsRoute() string { - return "/groups" -} - -func (c *Client4) GetPublishUserTypingRoute(userId string) string { - return c.GetUserRoute(userId) + "/typing" -} - -func (c *Client4) GetGroupRoute(groupID string) string { - return fmt.Sprintf("%s/%s", c.GetGroupsRoute(), groupID) -} - -func (c *Client4) GetGroupSyncableRoute(groupID, syncableID string, syncableType GroupSyncableType) string { - return fmt.Sprintf("%s/%ss/%s", c.GetGroupRoute(groupID), strings.ToLower(syncableType.String()), syncableID) -} - -func (c *Client4) GetGroupSyncablesRoute(groupID string, syncableType GroupSyncableType) string { - return fmt.Sprintf("%s/%ss", c.GetGroupRoute(groupID), strings.ToLower(syncableType.String())) -} - -func (c *Client4) DoApiGet(url string, etag string) (*http.Response, *AppError) { - return c.DoApiRequest(http.MethodGet, c.ApiUrl+url, "", etag) -} - -func (c *Client4) DoApiPost(url string, data string) (*http.Response, *AppError) { - return c.DoApiRequest(http.MethodPost, c.ApiUrl+url, data, "") -} - -func (c *Client4) doApiPostBytes(url string, data []byte) (*http.Response, *AppError) { - return c.doApiRequestBytes(http.MethodPost, c.ApiUrl+url, data, "") -} - -func (c *Client4) DoApiPut(url string, data string) (*http.Response, *AppError) { - return c.DoApiRequest(http.MethodPut, c.ApiUrl+url, data, "") -} - -func (c *Client4) doApiPutBytes(url string, data []byte) (*http.Response, *AppError) { - return c.doApiRequestBytes(http.MethodPut, c.ApiUrl+url, data, "") -} - -func (c *Client4) DoApiDelete(url string) (*http.Response, *AppError) { - return c.DoApiRequest(http.MethodDelete, c.ApiUrl+url, "", "") -} - -func (c *Client4) DoApiRequest(method, url, data, etag string) (*http.Response, *AppError) { - return c.doApiRequestReader(method, url, strings.NewReader(data), etag) -} - -func (c *Client4) doApiRequestBytes(method, url string, data []byte, etag string) (*http.Response, *AppError) { - return c.doApiRequestReader(method, url, bytes.NewReader(data), etag) -} - -func (c *Client4) doApiRequestReader(method, url string, data io.Reader, etag string) (*http.Response, *AppError) { - rq, err := http.NewRequest(method, url, data) - if err != nil { - return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest) - } - - if len(etag) > 0 { - rq.Header.Set(HEADER_ETAG_CLIENT, etag) - } - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - if c.HttpHeader != nil && len(c.HttpHeader) > 0 { - for k, v := range c.HttpHeader { - rq.Header.Set(k, v) - } - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) - } - - if rp.StatusCode == 304 { - return rp, nil - } - - if rp.StatusCode >= 300 { - defer closeBody(rp) - return rp, AppErrorFromJson(rp.Body) - } - - return rp, nil -} - -func (c *Client4) DoUploadFile(url string, data []byte, contentType string) (*FileUploadResponse, *Response) { - return c.doUploadFile(url, bytes.NewReader(data), contentType, 0) -} - -func (c *Client4) doUploadFile(url string, body io.Reader, contentType string, contentLength int64) (*FileUploadResponse, *Response) { - rq, err := http.NewRequest("POST", c.ApiUrl+url, body) - if err != nil { - return nil, &Response{Error: NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - if contentLength != 0 { - rq.ContentLength = contentLength - } - rq.Header.Set("Content-Type", contentType) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)) - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return FileUploadResponseFromJson(rp.Body), BuildResponse(rp) -} - -func (c *Client4) DoEmojiUploadFile(url string, data []byte, contentType string) (*Emoji, *Response) { - rq, err := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data)) - if err != nil { - return nil, &Response{Error: NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", contentType) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)) - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return EmojiFromJson(rp.Body), BuildResponse(rp) -} - -func (c *Client4) DoUploadImportTeam(url string, data []byte, contentType string) (map[string]string, *Response) { - rq, err := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data)) - if err != nil { - return nil, &Response{Error: NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", contentType) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)) - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return MapFromJson(rp.Body), BuildResponse(rp) -} - -// CheckStatusOK is a convenience function for checking the standard OK response -// from the web service. -func CheckStatusOK(r *http.Response) bool { - m := MapFromJson(r.Body) - defer closeBody(r) - - if m != nil && m[STATUS] == STATUS_OK { - return true - } - - return false -} - -// Authentication Section - -// LoginById authenticates a user by user id and password. -func (c *Client4) LoginById(id string, password string) (*User, *Response) { - m := make(map[string]string) - m["id"] = id - m["password"] = password - return c.login(m) -} - -// Login authenticates a user by login id, which can be username, email or some sort -// of SSO identifier based on server configuration, and a password. -func (c *Client4) Login(loginId string, password string) (*User, *Response) { - m := make(map[string]string) - m["login_id"] = loginId - m["password"] = password - return c.login(m) -} - -// LoginByLdap authenticates a user by LDAP id and password. -func (c *Client4) LoginByLdap(loginId string, password string) (*User, *Response) { - m := make(map[string]string) - m["login_id"] = loginId - m["password"] = password - m["ldap_only"] = c.boolString(true) - return c.login(m) -} - -// LoginWithDevice authenticates a user by login id (username, email or some sort -// of SSO identifier based on configuration), password and attaches a device id to -// the session. -func (c *Client4) LoginWithDevice(loginId string, password string, deviceId string) (*User, *Response) { - m := make(map[string]string) - m["login_id"] = loginId - m["password"] = password - m["device_id"] = deviceId - return c.login(m) -} - -// LoginWithMFA logs a user in with a MFA token -func (c *Client4) LoginWithMFA(loginId, password, mfaToken string) (*User, *Response) { - m := make(map[string]string) - m["login_id"] = loginId - m["password"] = password - m["token"] = mfaToken - return c.login(m) -} - -func (c *Client4) login(m map[string]string) (*User, *Response) { - r, err := c.DoApiPost("/users/login", MapToJson(m)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - c.AuthToken = r.Header.Get(HEADER_TOKEN) - c.AuthType = HEADER_BEARER - return UserFromJson(r.Body), BuildResponse(r) -} - -// Logout terminates the current user's session. -func (c *Client4) Logout() (bool, *Response) { - r, err := c.DoApiPost("/users/logout", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - c.AuthToken = "" - c.AuthType = HEADER_BEARER - return CheckStatusOK(r), BuildResponse(r) -} - -// SwitchAccountType changes a user's login type from one type to another. -func (c *Client4) SwitchAccountType(switchRequest *SwitchRequest) (string, *Response) { - r, err := c.DoApiPost(c.GetUsersRoute()+"/login/switch", switchRequest.ToJson()) - if err != nil { - return "", BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body)["follow_link"], BuildResponse(r) -} - -// User Section - -// CreateUser creates a user in the system based on the provided user struct. -func (c *Client4) CreateUser(user *User) (*User, *Response) { - r, err := c.DoApiPost(c.GetUsersRoute(), user.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// CreateUserWithToken creates a user in the system based on the provided tokenId. -func (c *Client4) CreateUserWithToken(user *User, tokenId string) (*User, *Response) { - if tokenId == "" { - err := NewAppError("MissingHashOrData", "api.user.create_user.missing_token.app_error", nil, "", http.StatusBadRequest) - return nil, &Response{StatusCode: err.StatusCode, Error: err} - } - - query := fmt.Sprintf("?t=%v", tokenId) - r, err := c.DoApiPost(c.GetUsersRoute()+query, user.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - return UserFromJson(r.Body), BuildResponse(r) -} - -// CreateUserWithInviteId creates a user in the system based on the provided invited id. -func (c *Client4) CreateUserWithInviteId(user *User, inviteId string) (*User, *Response) { - if inviteId == "" { - err := NewAppError("MissingInviteId", "api.user.create_user.missing_invite_id.app_error", nil, "", http.StatusBadRequest) - return nil, &Response{StatusCode: err.StatusCode, Error: err} - } - - query := fmt.Sprintf("?iid=%v", url.QueryEscape(inviteId)) - r, err := c.DoApiPost(c.GetUsersRoute()+query, user.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - return UserFromJson(r.Body), BuildResponse(r) -} - -// GetMe returns the logged in user. -func (c *Client4) GetMe(etag string) (*User, *Response) { - r, err := c.DoApiGet(c.GetUserRoute(ME), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// GetUser returns a user based on the provided user id string. -func (c *Client4) GetUser(userId, etag string) (*User, *Response) { - r, err := c.DoApiGet(c.GetUserRoute(userId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// GetUserByUsername returns a user based on the provided user name string. -func (c *Client4) GetUserByUsername(userName, etag string) (*User, *Response) { - r, err := c.DoApiGet(c.GetUserByUsernameRoute(userName), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// GetUserByEmail returns a user based on the provided user email string. -func (c *Client4) GetUserByEmail(email, etag string) (*User, *Response) { - r, err := c.DoApiGet(c.GetUserByEmailRoute(email), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// AutocompleteUsersInTeam returns the users on a team based on search term. -func (c *Client4) AutocompleteUsersInTeam(teamId string, username string, limit int, etag string) (*UserAutocomplete, *Response) { - query := fmt.Sprintf("?in_team=%v&name=%v&limit=%d", teamId, username, limit) - r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAutocompleteFromJson(r.Body), BuildResponse(r) -} - -// AutocompleteUsersInChannel returns the users in a channel based on search term. -func (c *Client4) AutocompleteUsersInChannel(teamId string, channelId string, username string, limit int, etag string) (*UserAutocomplete, *Response) { - query := fmt.Sprintf("?in_team=%v&in_channel=%v&name=%v&limit=%d", teamId, channelId, username, limit) - r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAutocompleteFromJson(r.Body), BuildResponse(r) -} - -// AutocompleteUsers returns the users in the system based on search term. -func (c *Client4) AutocompleteUsers(username string, limit int, etag string) (*UserAutocomplete, *Response) { - query := fmt.Sprintf("?name=%v&limit=%d", username, limit) - r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAutocompleteFromJson(r.Body), BuildResponse(r) -} - -// GetDefaultProfileImage gets the default user's profile image. Must be logged in. -func (c *Client4) GetDefaultProfileImage(userId string) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetUserRoute(userId)+"/image/default", "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetDefaultProfileImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - - return data, BuildResponse(r) -} - -// GetProfileImage gets user's profile image. Must be logged in. -func (c *Client4) GetProfileImage(userId, etag string) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetUserRoute(userId)+"/image", etag) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetProfileImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// GetUsers returns a page of users on the system. Page counting starts at 0. -func (c *Client4) GetUsers(page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersInTeam returns a page of users on a team. Page counting starts at 0. -func (c *Client4) GetUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?in_team=%v&page=%v&per_page=%v", teamId, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetNewUsersInTeam returns a page of users on a team. Page counting starts at 0. -func (c *Client4) GetNewUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?sort=create_at&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetRecentlyActiveUsersInTeam returns a page of users on a team. Page counting starts at 0. -func (c *Client4) GetRecentlyActiveUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?sort=last_activity_at&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetActiveUsersInTeam returns a page of users on a team. Page counting starts at 0. -func (c *Client4) GetActiveUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?active=true&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersNotInTeam returns a page of users who are not in a team. Page counting starts at 0. -func (c *Client4) GetUsersNotInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?not_in_team=%v&page=%v&per_page=%v", teamId, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersInChannel returns a page of users in a channel. Page counting starts at 0. -func (c *Client4) GetUsersInChannel(channelId string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?in_channel=%v&page=%v&per_page=%v", channelId, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersInChannelByStatus returns a page of users in a channel. Page counting starts at 0. Sorted by Status -func (c *Client4) GetUsersInChannelByStatus(channelId string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?in_channel=%v&page=%v&per_page=%v&sort=status", channelId, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersNotInChannel returns a page of users not in a channel. Page counting starts at 0. -func (c *Client4) GetUsersNotInChannel(teamId, channelId string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?in_team=%v¬_in_channel=%v&page=%v&per_page=%v", teamId, channelId, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersWithoutTeam returns a page of users on the system that aren't on any teams. Page counting starts at 0. -func (c *Client4) GetUsersWithoutTeam(page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?without_team=1&page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersInGroup returns a page of users in a group. Page counting starts at 0. -func (c *Client4) GetUsersInGroup(groupID string, page int, perPage int, etag string) ([]*User, *Response) { - query := fmt.Sprintf("?in_group=%v&page=%v&per_page=%v", groupID, page, perPage) - r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersByIds returns a list of users based on the provided user ids. -func (c *Client4) GetUsersByIds(userIds []string) ([]*User, *Response) { - r, err := c.DoApiPost(c.GetUsersRoute()+"/ids", ArrayToJson(userIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersByIds returns a list of users based on the provided user ids. -func (c *Client4) GetUsersByIdsWithOptions(userIds []string, options *UserGetByIdsOptions) ([]*User, *Response) { - v := url.Values{} - if options.Since != 0 { - v.Set("since", fmt.Sprintf("%d", options.Since)) - } - - url := c.GetUsersRoute() + "/ids" - if len(v) > 0 { - url += "?" + v.Encode() - } - - r, err := c.DoApiPost(url, ArrayToJson(userIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersByUsernames returns a list of users based on the provided usernames. -func (c *Client4) GetUsersByUsernames(usernames []string) ([]*User, *Response) { - r, err := c.DoApiPost(c.GetUsersRoute()+"/usernames", ArrayToJson(usernames)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// GetUsersByGroupChannelIds returns a map with channel ids as keys -// and a list of users as values based on the provided user ids. -func (c *Client4) GetUsersByGroupChannelIds(groupChannelIds []string) (map[string][]*User, *Response) { - r, err := c.DoApiPost(c.GetUsersRoute()+"/group_channels", ArrayToJson(groupChannelIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - usersByChannelId := map[string][]*User{} - json.NewDecoder(r.Body).Decode(&usersByChannelId) - return usersByChannelId, BuildResponse(r) -} - -// SearchUsers returns a list of users based on some search criteria. -func (c *Client4) SearchUsers(search *UserSearch) ([]*User, *Response) { - r, err := c.doApiPostBytes(c.GetUsersRoute()+"/search", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserListFromJson(r.Body), BuildResponse(r) -} - -// UpdateUser updates a user in the system based on the provided user struct. -func (c *Client4) UpdateUser(user *User) (*User, *Response) { - r, err := c.DoApiPut(c.GetUserRoute(user.Id), user.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// PatchUser partially updates a user in the system. Any missing fields are not updated. -func (c *Client4) PatchUser(userId string, patch *UserPatch) (*User, *Response) { - r, err := c.DoApiPut(c.GetUserRoute(userId)+"/patch", patch.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// UpdateUserAuth updates a user AuthData (uthData, authService and password) in the system. -func (c *Client4) UpdateUserAuth(userId string, userAuth *UserAuth) (*UserAuth, *Response) { - r, err := c.DoApiPut(c.GetUserRoute(userId)+"/auth", userAuth.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAuthFromJson(r.Body), BuildResponse(r) -} - -// UpdateUserMfa activates multi-factor authentication for a user if activate -// is true and a valid code is provided. If activate is false, then code is not -// required and multi-factor authentication is disabled for the user. -func (c *Client4) UpdateUserMfa(userId, code string, activate bool) (bool, *Response) { - requestBody := make(map[string]interface{}) - requestBody["activate"] = activate - requestBody["code"] = code - - r, err := c.DoApiPut(c.GetUserRoute(userId)+"/mfa", StringInterfaceToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// CheckUserMfa checks whether a user has MFA active on their account or not based on the -// provided login id. -// Deprecated: Clients should use Login method and check for MFA Error -func (c *Client4) CheckUserMfa(loginId string) (bool, *Response) { - requestBody := make(map[string]interface{}) - requestBody["login_id"] = loginId - r, err := c.DoApiPost(c.GetUsersRoute()+"/mfa", StringInterfaceToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - - data := StringInterfaceFromJson(r.Body) - mfaRequired, ok := data["mfa_required"].(bool) - if !ok { - return false, BuildResponse(r) - } - return mfaRequired, BuildResponse(r) -} - -// GenerateMfaSecret will generate a new MFA secret for a user and return it as a string and -// as a base64 encoded image QR code. -func (c *Client4) GenerateMfaSecret(userId string) (*MfaSecret, *Response) { - r, err := c.DoApiPost(c.GetUserRoute(userId)+"/mfa/generate", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return MfaSecretFromJson(r.Body), BuildResponse(r) -} - -// UpdateUserPassword updates a user's password. Must be logged in as the user or be a system administrator. -func (c *Client4) UpdateUserPassword(userId, currentPassword, newPassword string) (bool, *Response) { - requestBody := map[string]string{"current_password": currentPassword, "new_password": newPassword} - r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// PromoteGuestToUser convert a guest into a regular user -func (c *Client4) PromoteGuestToUser(guestId string) (bool, *Response) { - r, err := c.DoApiPost(c.GetUserRoute(guestId)+"/promote", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// DemoteUserToGuest convert a regular user into a guest -func (c *Client4) DemoteUserToGuest(guestId string) (bool, *Response) { - r, err := c.DoApiPost(c.GetUserRoute(guestId)+"/demote", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateUserRoles updates a user's roles in the system. A user can have "system_user" and "system_admin" roles. -func (c *Client4) UpdateUserRoles(userId, roles string) (bool, *Response) { - requestBody := map[string]string{"roles": roles} - r, err := c.DoApiPut(c.GetUserRoute(userId)+"/roles", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateUserActive updates status of a user whether active or not. -func (c *Client4) UpdateUserActive(userId string, active bool) (bool, *Response) { - requestBody := make(map[string]interface{}) - requestBody["active"] = active - r, err := c.DoApiPut(c.GetUserRoute(userId)+"/active", StringInterfaceToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - - return CheckStatusOK(r), BuildResponse(r) -} - -// DeleteUser deactivates a user in the system based on the provided user id string. -func (c *Client4) DeleteUser(userId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetUserRoute(userId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// ConvertUserToBot converts a user to a bot user. -func (c *Client4) ConvertUserToBot(userId string) (*Bot, *Response) { - r, err := c.DoApiPost(c.GetUserRoute(userId)+"/convert_to_bot", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotFromJson(r.Body), BuildResponse(r) -} - -// ConvertBotToUser converts a bot user to a user. -func (c *Client4) ConvertBotToUser(userId string, userPatch *UserPatch, setSystemAdmin bool) (*User, *Response) { - var query string - if setSystemAdmin { - query = "?set_system_admin=true" - } - r, err := c.DoApiPost(c.GetBotRoute(userId)+"/convert_to_user"+query, userPatch.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// PermanentDeleteAll permanently deletes all users in the system. This is a local only endpoint -func (c *Client4) PermanentDeleteAllUsers() (bool, *Response) { - r, err := c.DoApiDelete(c.GetUsersRoute()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// SendPasswordResetEmail will send a link for password resetting to a user with the -// provided email. -func (c *Client4) SendPasswordResetEmail(email string) (bool, *Response) { - requestBody := map[string]string{"email": email} - r, err := c.DoApiPost(c.GetUsersRoute()+"/password/reset/send", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// ResetPassword uses a recovery code to update reset a user's password. -func (c *Client4) ResetPassword(token, newPassword string) (bool, *Response) { - requestBody := map[string]string{"token": token, "new_password": newPassword} - r, err := c.DoApiPost(c.GetUsersRoute()+"/password/reset", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetSessions returns a list of sessions based on the provided user id string. -func (c *Client4) GetSessions(userId, etag string) ([]*Session, *Response) { - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/sessions", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return SessionsFromJson(r.Body), BuildResponse(r) -} - -// RevokeSession revokes a user session based on the provided user id and session id strings. -func (c *Client4) RevokeSession(userId, sessionId string) (bool, *Response) { - requestBody := map[string]string{"session_id": sessionId} - r, err := c.DoApiPost(c.GetUserRoute(userId)+"/sessions/revoke", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// RevokeAllSessions revokes all sessions for the provided user id string. -func (c *Client4) RevokeAllSessions(userId string) (bool, *Response) { - r, err := c.DoApiPost(c.GetUserRoute(userId)+"/sessions/revoke/all", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// RevokeAllSessions revokes all sessions for all the users. -func (c *Client4) RevokeSessionsFromAllUsers() (bool, *Response) { - r, err := c.DoApiPost(c.GetUsersRoute()+"/sessions/revoke/all", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// AttachDeviceId attaches a mobile device ID to the current session. -func (c *Client4) AttachDeviceId(deviceId string) (bool, *Response) { - requestBody := map[string]string{"device_id": deviceId} - r, err := c.DoApiPut(c.GetUsersRoute()+"/sessions/device", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetTeamsUnreadForUser will return an array with TeamUnread objects that contain the amount -// of unread messages and mentions the current user has for the teams it belongs to. -// An optional team ID can be set to exclude that team from the results. Must be authenticated. -func (c *Client4) GetTeamsUnreadForUser(userId, teamIdToExclude string) ([]*TeamUnread, *Response) { - var optional string - if teamIdToExclude != "" { - optional += fmt.Sprintf("?exclude_team=%s", url.QueryEscape(teamIdToExclude)) - } - - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams/unread"+optional, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamsUnreadFromJson(r.Body), BuildResponse(r) -} - -// GetUserAudits returns a list of audit based on the provided user id string. -func (c *Client4) GetUserAudits(userId string, page int, perPage int, etag string) (Audits, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/audits"+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return AuditsFromJson(r.Body), BuildResponse(r) -} - -// VerifyUserEmail will verify a user's email using the supplied token. -func (c *Client4) VerifyUserEmail(token string) (bool, *Response) { - requestBody := map[string]string{"token": token} - r, err := c.DoApiPost(c.GetUsersRoute()+"/email/verify", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// VerifyUserEmailWithoutToken will verify a user's email by its Id. (Requires manage system role) -func (c *Client4) VerifyUserEmailWithoutToken(userId string) (*User, *Response) { - r, err := c.DoApiPost(c.GetUserRoute(userId)+"/email/verify/member", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserFromJson(r.Body), BuildResponse(r) -} - -// SendVerificationEmail will send an email to the user with the provided email address, if -// that user exists. The email will contain a link that can be used to verify the user's -// email address. -func (c *Client4) SendVerificationEmail(email string) (bool, *Response) { - requestBody := map[string]string{"email": email} - r, err := c.DoApiPost(c.GetUsersRoute()+"/email/verify/send", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// SetDefaultProfileImage resets the profile image to a default generated one. -func (c *Client4) SetDefaultProfileImage(userId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetUserRoute(userId) + "/image") - if err != nil { - return false, BuildErrorResponse(r, err) - } - return CheckStatusOK(r), BuildResponse(r) -} - -// SetProfileImage sets profile image of the user. -func (c *Client4) SetProfileImage(userId string, data []byte) (bool, *Response) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormFile("image", "profile.png") - if err != nil { - return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { - return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if err = writer.Close(); err != nil { - return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - rq, err := http.NewRequest("POST", c.ApiUrl+c.GetUserRoute(userId)+"/image", bytes.NewReader(body.Bytes())) - if err != nil { - return false, &Response{Error: NewAppError("SetProfileImage", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", writer.FormDataContentType()) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetUserRoute(userId)+"/image", "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return CheckStatusOK(rp), BuildResponse(rp) -} - -// CreateUserAccessToken will generate a user access token that can be used in place -// of a session token to access the REST API. Must have the 'create_user_access_token' -// permission and if generating for another user, must have the 'edit_other_users' -// permission. A non-blank description is required. -func (c *Client4) CreateUserAccessToken(userId, description string) (*UserAccessToken, *Response) { - requestBody := map[string]string{"description": description} - r, err := c.DoApiPost(c.GetUserRoute(userId)+"/tokens", MapToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAccessTokenFromJson(r.Body), BuildResponse(r) -} - -// GetUserAccessTokens will get a page of access tokens' id, description, is_active -// and the user_id in the system. The actual token will not be returned. Must have -// the 'manage_system' permission. -func (c *Client4) GetUserAccessTokens(page int, perPage int) ([]*UserAccessToken, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetUserAccessTokensRoute()+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAccessTokenListFromJson(r.Body), BuildResponse(r) -} - -// GetUserAccessToken will get a user access tokens' id, description, is_active -// and the user_id of the user it is for. The actual token will not be returned. -// Must have the 'read_user_access_token' permission and if getting for another -// user, must have the 'edit_other_users' permission. -func (c *Client4) GetUserAccessToken(tokenId string) (*UserAccessToken, *Response) { - r, err := c.DoApiGet(c.GetUserAccessTokenRoute(tokenId), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAccessTokenFromJson(r.Body), BuildResponse(r) -} - -// GetUserAccessTokensForUser will get a paged list of user access tokens showing id, -// description and user_id for each. The actual tokens will not be returned. Must have -// the 'read_user_access_token' permission and if getting for another user, must have the -// 'edit_other_users' permission. -func (c *Client4) GetUserAccessTokensForUser(userId string, page, perPage int) ([]*UserAccessToken, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/tokens"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAccessTokenListFromJson(r.Body), BuildResponse(r) -} - -// RevokeUserAccessToken will revoke a user access token by id. Must have the -// 'revoke_user_access_token' permission and if revoking for another user, must have the -// 'edit_other_users' permission. -func (c *Client4) RevokeUserAccessToken(tokenId string) (bool, *Response) { - requestBody := map[string]string{"token_id": tokenId} - r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/revoke", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// SearchUserAccessTokens returns user access tokens matching the provided search term. -func (c *Client4) SearchUserAccessTokens(search *UserAccessTokenSearch) ([]*UserAccessToken, *Response) { - r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/search", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserAccessTokenListFromJson(r.Body), BuildResponse(r) -} - -// DisableUserAccessToken will disable a user access token by id. Must have the -// 'revoke_user_access_token' permission and if disabling for another user, must have the -// 'edit_other_users' permission. -func (c *Client4) DisableUserAccessToken(tokenId string) (bool, *Response) { - requestBody := map[string]string{"token_id": tokenId} - r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/disable", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// EnableUserAccessToken will enable a user access token by id. Must have the -// 'create_user_access_token' permission and if enabling for another user, must have the -// 'edit_other_users' permission. -func (c *Client4) EnableUserAccessToken(tokenId string) (bool, *Response) { - requestBody := map[string]string{"token_id": tokenId} - r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/enable", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// Bots section - -// CreateBot creates a bot in the system based on the provided bot struct. -func (c *Client4) CreateBot(bot *Bot) (*Bot, *Response) { - r, err := c.doApiPostBytes(c.GetBotsRoute(), bot.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotFromJson(r.Body), BuildResponse(r) -} - -// PatchBot partially updates a bot. Any missing fields are not updated. -func (c *Client4) PatchBot(userId string, patch *BotPatch) (*Bot, *Response) { - r, err := c.doApiPutBytes(c.GetBotRoute(userId), patch.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotFromJson(r.Body), BuildResponse(r) -} - -// GetBot fetches the given, undeleted bot. -func (c *Client4) GetBot(userId string, etag string) (*Bot, *Response) { - r, err := c.DoApiGet(c.GetBotRoute(userId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotFromJson(r.Body), BuildResponse(r) -} - -// GetBot fetches the given bot, even if it is deleted. -func (c *Client4) GetBotIncludeDeleted(userId string, etag string) (*Bot, *Response) { - r, err := c.DoApiGet(c.GetBotRoute(userId)+"?include_deleted="+c.boolString(true), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotFromJson(r.Body), BuildResponse(r) -} - -// GetBots fetches the given page of bots, excluding deleted. -func (c *Client4) GetBots(page, perPage int, etag string) ([]*Bot, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetBotsRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotListFromJson(r.Body), BuildResponse(r) -} - -// GetBotsIncludeDeleted fetches the given page of bots, including deleted. -func (c *Client4) GetBotsIncludeDeleted(page, perPage int, etag string) ([]*Bot, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted="+c.boolString(true), page, perPage) - r, err := c.DoApiGet(c.GetBotsRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotListFromJson(r.Body), BuildResponse(r) -} - -// GetBotsOrphaned fetches the given page of bots, only including orphanded bots. -func (c *Client4) GetBotsOrphaned(page, perPage int, etag string) ([]*Bot, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&only_orphaned="+c.boolString(true), page, perPage) - r, err := c.DoApiGet(c.GetBotsRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotListFromJson(r.Body), BuildResponse(r) -} - -// DisableBot disables the given bot in the system. -func (c *Client4) DisableBot(botUserId string) (*Bot, *Response) { - r, err := c.doApiPostBytes(c.GetBotRoute(botUserId)+"/disable", nil) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotFromJson(r.Body), BuildResponse(r) -} - -// EnableBot disables the given bot in the system. -func (c *Client4) EnableBot(botUserId string) (*Bot, *Response) { - r, err := c.doApiPostBytes(c.GetBotRoute(botUserId)+"/enable", nil) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotFromJson(r.Body), BuildResponse(r) -} - -// AssignBot assigns the given bot to the given user -func (c *Client4) AssignBot(botUserId, newOwnerId string) (*Bot, *Response) { - r, err := c.doApiPostBytes(c.GetBotRoute(botUserId)+"/assign/"+newOwnerId, nil) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return BotFromJson(r.Body), BuildResponse(r) -} - -// SetBotIconImage sets LHS bot icon image. -func (c *Client4) SetBotIconImage(botUserId string, data []byte) (bool, *Response) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormFile("image", "icon.svg") - if err != nil { - return false, &Response{Error: NewAppError("SetBotIconImage", "model.client.set_bot_icon_image.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { - return false, &Response{Error: NewAppError("SetBotIconImage", "model.client.set_bot_icon_image.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if err = writer.Close(); err != nil { - return false, &Response{Error: NewAppError("SetBotIconImage", "model.client.set_bot_icon_image.writer.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - rq, err := http.NewRequest("POST", c.ApiUrl+c.GetBotRoute(botUserId)+"/icon", bytes.NewReader(body.Bytes())) - if err != nil { - return false, &Response{Error: NewAppError("SetBotIconImage", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", writer.FormDataContentType()) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetBotRoute(botUserId)+"/icon", "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return CheckStatusOK(rp), BuildResponse(rp) -} - -// GetBotIconImage gets LHS bot icon image. Must be logged in. -func (c *Client4) GetBotIconImage(botUserId string) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetBotRoute(botUserId)+"/icon", "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetBotIconImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// DeleteBotIconImage deletes LHS bot icon image. Must be logged in. -func (c *Client4) DeleteBotIconImage(botUserId string) (bool, *Response) { - r, appErr := c.DoApiDelete(c.GetBotRoute(botUserId) + "/icon") - if appErr != nil { - return false, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// Team Section - -// CreateTeam creates a team in the system based on the provided team struct. -func (c *Client4) CreateTeam(team *Team) (*Team, *Response) { - r, err := c.DoApiPost(c.GetTeamsRoute(), team.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// GetTeam returns a team based on the provided team id string. -func (c *Client4) GetTeam(teamId, etag string) (*Team, *Response) { - r, err := c.DoApiGet(c.GetTeamRoute(teamId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// GetAllTeams returns all teams based on permissions. -func (c *Client4) GetAllTeams(etag string, page int, perPage int) ([]*Team, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetTeamsRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamListFromJson(r.Body), BuildResponse(r) -} - -// GetAllTeamsWithTotalCount returns all teams based on permissions. -func (c *Client4) GetAllTeamsWithTotalCount(etag string, page int, perPage int) ([]*Team, int64, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage) - r, err := c.DoApiGet(c.GetTeamsRoute()+query, etag) - if err != nil { - return nil, 0, BuildErrorResponse(r, err) - } - defer closeBody(r) - teamsListWithCount := TeamsWithCountFromJson(r.Body) - return teamsListWithCount.Teams, teamsListWithCount.TotalCount, BuildResponse(r) -} - -// GetTeamByName returns a team based on the provided team name string. -func (c *Client4) GetTeamByName(name, etag string) (*Team, *Response) { - r, err := c.DoApiGet(c.GetTeamByNameRoute(name), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// SearchTeams returns teams matching the provided search term. -func (c *Client4) SearchTeams(search *TeamSearch) ([]*Team, *Response) { - r, err := c.DoApiPost(c.GetTeamsRoute()+"/search", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamListFromJson(r.Body), BuildResponse(r) -} - -// SearchTeamsPaged returns a page of teams and the total count matching the provided search term. -func (c *Client4) SearchTeamsPaged(search *TeamSearch) ([]*Team, int64, *Response) { - if search.Page == nil { - search.Page = NewInt(0) - } - if search.PerPage == nil { - search.PerPage = NewInt(100) - } - r, err := c.DoApiPost(c.GetTeamsRoute()+"/search", search.ToJson()) - if err != nil { - return nil, 0, BuildErrorResponse(r, err) - } - defer closeBody(r) - twc := TeamsWithCountFromJson(r.Body) - return twc.Teams, twc.TotalCount, BuildResponse(r) -} - -// TeamExists returns true or false if the team exist or not. -func (c *Client4) TeamExists(name, etag string) (bool, *Response) { - r, err := c.DoApiGet(c.GetTeamByNameRoute(name)+"/exists", etag) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapBoolFromJson(r.Body)["exists"], BuildResponse(r) -} - -// GetTeamsForUser returns a list of teams a user is on. Must be logged in as the user -// or be a system administrator. -func (c *Client4) GetTeamsForUser(userId, etag string) ([]*Team, *Response) { - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamListFromJson(r.Body), BuildResponse(r) -} - -// GetTeamMember returns a team member based on the provided team and user id strings. -func (c *Client4) GetTeamMember(teamId, userId, etag string) (*TeamMember, *Response) { - r, err := c.DoApiGet(c.GetTeamMemberRoute(teamId, userId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMemberFromJson(r.Body), BuildResponse(r) -} - -// UpdateTeamMemberRoles will update the roles on a team for a user. -func (c *Client4) UpdateTeamMemberRoles(teamId, userId, newRoles string) (bool, *Response) { - requestBody := map[string]string{"roles": newRoles} - r, err := c.DoApiPut(c.GetTeamMemberRoute(teamId, userId)+"/roles", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateTeamMemberSchemeRoles will update the scheme-derived roles on a team for a user. -func (c *Client4) UpdateTeamMemberSchemeRoles(teamId string, userId string, schemeRoles *SchemeRoles) (bool, *Response) { - r, err := c.DoApiPut(c.GetTeamMemberRoute(teamId, userId)+"/schemeRoles", schemeRoles.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateTeam will update a team. -func (c *Client4) UpdateTeam(team *Team) (*Team, *Response) { - r, err := c.DoApiPut(c.GetTeamRoute(team.Id), team.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// PatchTeam partially updates a team. Any missing fields are not updated. -func (c *Client4) PatchTeam(teamId string, patch *TeamPatch) (*Team, *Response) { - r, err := c.DoApiPut(c.GetTeamRoute(teamId)+"/patch", patch.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// RestoreTeam restores a previously deleted team. -func (c *Client4) RestoreTeam(teamId string) (*Team, *Response) { - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/restore", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// RegenerateTeamInviteId requests a new invite ID to be generated. -func (c *Client4) RegenerateTeamInviteId(teamId string) (*Team, *Response) { - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/regenerate_invite_id", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// SoftDeleteTeam deletes the team softly (archive only, not permanent delete). -func (c *Client4) SoftDeleteTeam(teamId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetTeamRoute(teamId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// PermanentDeleteTeam deletes the team, should only be used when needed for -// compliance and the like. -func (c *Client4) PermanentDeleteTeam(teamId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=" + c.boolString(true)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateTeamPrivacy modifies the team type (model.TEAM_OPEN <--> model.TEAM_INVITE) and sets -// the corresponding AllowOpenInvite appropriately. -func (c *Client4) UpdateTeamPrivacy(teamId string, privacy string) (*Team, *Response) { - requestBody := map[string]string{"privacy": privacy} - r, err := c.DoApiPut(c.GetTeamRoute(teamId)+"/privacy", MapToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// GetTeamMembers returns team members based on the provided team id string. -func (c *Client4) GetTeamMembers(teamId string, page int, perPage int, etag string) ([]*TeamMember, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetTeamMembersRoute(teamId)+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMembersFromJson(r.Body), BuildResponse(r) -} - -// GetTeamMembersWithoutDeletedUsers returns team members based on the provided team id string. Additional parameters of sort and exclude_deleted_users accepted as well -// Could not add it to above function due to it be a breaking change. -func (c *Client4) GetTeamMembersSortAndWithoutDeletedUsers(teamId string, page int, perPage int, sort string, exclude_deleted_users bool, etag string) ([]*TeamMember, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&sort=%v&exclude_deleted_users=%v", page, perPage, sort, exclude_deleted_users) - r, err := c.DoApiGet(c.GetTeamMembersRoute(teamId)+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMembersFromJson(r.Body), BuildResponse(r) -} - -// GetTeamMembersForUser returns the team members for a user. -func (c *Client4) GetTeamMembersForUser(userId string, etag string) ([]*TeamMember, *Response) { - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams/members", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMembersFromJson(r.Body), BuildResponse(r) -} - -// GetTeamMembersByIds will return an array of team members based on the -// team id and a list of user ids provided. Must be authenticated. -func (c *Client4) GetTeamMembersByIds(teamId string, userIds []string) ([]*TeamMember, *Response) { - r, err := c.DoApiPost(fmt.Sprintf("/teams/%v/members/ids", teamId), ArrayToJson(userIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMembersFromJson(r.Body), BuildResponse(r) -} - -// AddTeamMember adds user to a team and return a team member. -func (c *Client4) AddTeamMember(teamId, userId string) (*TeamMember, *Response) { - member := &TeamMember{TeamId: teamId, UserId: userId} - r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId), member.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMemberFromJson(r.Body), BuildResponse(r) -} - -// AddTeamMemberFromInvite adds a user to a team and return a team member using an invite id -// or an invite token/data pair. -func (c *Client4) AddTeamMemberFromInvite(token, inviteId string) (*TeamMember, *Response) { - var query string - - if inviteId != "" { - query += fmt.Sprintf("?invite_id=%v", inviteId) - } - - if token != "" { - query += fmt.Sprintf("?token=%v", token) - } - - r, err := c.DoApiPost(c.GetTeamsRoute()+"/members/invite"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMemberFromJson(r.Body), BuildResponse(r) -} - -// AddTeamMembers adds a number of users to a team and returns the team members. -func (c *Client4) AddTeamMembers(teamId string, userIds []string) ([]*TeamMember, *Response) { - var members []*TeamMember - for _, userId := range userIds { - member := &TeamMember{TeamId: teamId, UserId: userId} - members = append(members, member) - } - - r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch", TeamMembersToJson(members)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMembersFromJson(r.Body), BuildResponse(r) -} - -// AddTeamMembers adds a number of users to a team and returns the team members. -func (c *Client4) AddTeamMembersGracefully(teamId string, userIds []string) ([]*TeamMemberWithError, *Response) { - var members []*TeamMember - for _, userId := range userIds { - member := &TeamMember{TeamId: teamId, UserId: userId} - members = append(members, member) - } - - r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch?graceful="+c.boolString(true), TeamMembersToJson(members)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamMembersWithErrorFromJson(r.Body), BuildResponse(r) -} - -// RemoveTeamMember will remove a user from a team. -func (c *Client4) RemoveTeamMember(teamId, userId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetTeamMemberRoute(teamId, userId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetTeamStats returns a team stats based on the team id string. -// Must be authenticated. -func (c *Client4) GetTeamStats(teamId, etag string) (*TeamStats, *Response) { - r, err := c.DoApiGet(c.GetTeamStatsRoute(teamId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamStatsFromJson(r.Body), BuildResponse(r) -} - -// GetTotalUsersStats returns a total system user stats. -// Must be authenticated. -func (c *Client4) GetTotalUsersStats(etag string) (*UsersStats, *Response) { - r, err := c.DoApiGet(c.GetTotalUsersStatsRoute(), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UsersStatsFromJson(r.Body), BuildResponse(r) -} - -// GetTeamUnread will return a TeamUnread object that contains the amount of -// unread messages and mentions the user has for the specified team. -// Must be authenticated. -func (c *Client4) GetTeamUnread(teamId, userId string) (*TeamUnread, *Response) { - r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetTeamRoute(teamId)+"/unread", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamUnreadFromJson(r.Body), BuildResponse(r) -} - -// ImportTeam will import an exported team from other app into a existing team. -func (c *Client4) ImportTeam(data []byte, filesize int, importFrom, filename, teamId string) (map[string]string, *Response) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormFile("file", filename) - if err != nil { - return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { - return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - part, err = writer.CreateFormField("filesize") - if err != nil { - return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file_size.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if _, err = io.Copy(part, strings.NewReader(strconv.Itoa(filesize))); err != nil { - return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file_size.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - part, err = writer.CreateFormField("importFrom") - if err != nil { - return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.import_from.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if _, err := io.Copy(part, strings.NewReader(importFrom)); err != nil { - return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.import_from.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if err := writer.Close(); err != nil { - return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - return c.DoUploadImportTeam(c.GetTeamImportRoute(teamId), body.Bytes(), writer.FormDataContentType()) -} - -// InviteUsersToTeam invite users by email to the team. -func (c *Client4) InviteUsersToTeam(teamId string, userEmails []string) (bool, *Response) { - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email", ArrayToJson(userEmails)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// InviteGuestsToTeam invite guest by email to some channels in a team. -func (c *Client4) InviteGuestsToTeam(teamId string, userEmails []string, channels []string, message string) (bool, *Response) { - guestsInvite := GuestsInvite{ - Emails: userEmails, - Channels: channels, - Message: message, - } - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite-guests/email", guestsInvite.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// InviteUsersToTeam invite users by email to the team. -func (c *Client4) InviteUsersToTeamGracefully(teamId string, userEmails []string) ([]*EmailInviteWithError, *Response) { - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), ArrayToJson(userEmails)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return EmailInviteWithErrorFromJson(r.Body), BuildResponse(r) -} - -// InviteGuestsToTeam invite guest by email to some channels in a team. -func (c *Client4) InviteGuestsToTeamGracefully(teamId string, userEmails []string, channels []string, message string) ([]*EmailInviteWithError, *Response) { - guestsInvite := GuestsInvite{ - Emails: userEmails, - Channels: channels, - Message: message, - } - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite-guests/email?graceful="+c.boolString(true), guestsInvite.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return EmailInviteWithErrorFromJson(r.Body), BuildResponse(r) -} - -// InvalidateEmailInvites will invalidate active email invitations that have not been accepted by the user. -func (c *Client4) InvalidateEmailInvites() (bool, *Response) { - r, err := c.DoApiDelete(c.GetTeamsRoute() + "/invites/email") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetTeamInviteInfo returns a team object from an invite id containing sanitized information. -func (c *Client4) GetTeamInviteInfo(inviteId string) (*Team, *Response) { - r, err := c.DoApiGet(c.GetTeamsRoute()+"/invite/"+inviteId, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamFromJson(r.Body), BuildResponse(r) -} - -// SetTeamIcon sets team icon of the team. -func (c *Client4) SetTeamIcon(teamId string, data []byte) (bool, *Response) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormFile("image", "teamIcon.png") - if err != nil { - return false, &Response{Error: NewAppError("SetTeamIcon", "model.client.set_team_icon.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { - return false, &Response{Error: NewAppError("SetTeamIcon", "model.client.set_team_icon.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if err = writer.Close(); err != nil { - return false, &Response{Error: NewAppError("SetTeamIcon", "model.client.set_team_icon.writer.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - rq, err := http.NewRequest("POST", c.ApiUrl+c.GetTeamRoute(teamId)+"/image", bytes.NewReader(body.Bytes())) - if err != nil { - return false, &Response{Error: NewAppError("SetTeamIcon", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", writer.FormDataContentType()) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - // set to http.StatusForbidden(403) - return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetTeamRoute(teamId)+"/image", "model.client.connecting.app_error", nil, err.Error(), 403)} - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return CheckStatusOK(rp), BuildResponse(rp) -} - -// GetTeamIcon gets the team icon of the team. -func (c *Client4) GetTeamIcon(teamId, etag string) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetTeamRoute(teamId)+"/image", etag) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetTeamIcon", "model.client.get_team_icon.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// RemoveTeamIcon updates LastTeamIconUpdate to 0 which indicates team icon is removed. -func (c *Client4) RemoveTeamIcon(teamId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "/image") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// Channel Section - -// GetAllChannels get all the channels. Must be a system administrator. -func (c *Client4) GetAllChannels(page int, perPage int, etag string) (*ChannelListWithTeamData, *Response) { - return c.getAllChannels(page, perPage, etag, false) -} - -// GetAllChannelsIncludeDeleted get all the channels. Must be a system administrator. -func (c *Client4) GetAllChannelsIncludeDeleted(page int, perPage int, etag string) (*ChannelListWithTeamData, *Response) { - return c.getAllChannels(page, perPage, etag, true) -} - -func (c *Client4) getAllChannels(page int, perPage int, etag string, includeDeleted bool) (*ChannelListWithTeamData, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted=%v", page, perPage, includeDeleted) - r, err := c.DoApiGet(c.GetChannelsRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelListWithTeamDataFromJson(r.Body), BuildResponse(r) -} - -// GetAllChannelsWithCount get all the channels including the total count. Must be a system administrator. -func (c *Client4) GetAllChannelsWithCount(page int, perPage int, etag string) (*ChannelListWithTeamData, int64, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage) - r, err := c.DoApiGet(c.GetChannelsRoute()+query, etag) - if err != nil { - return nil, 0, BuildErrorResponse(r, err) - } - defer closeBody(r) - cwc := ChannelsWithCountFromJson(r.Body) - return cwc.Channels, cwc.TotalCount, BuildResponse(r) -} - -// CreateChannel creates a channel based on the provided channel struct. -func (c *Client4) CreateChannel(channel *Channel) (*Channel, *Response) { - r, err := c.DoApiPost(c.GetChannelsRoute(), channel.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// UpdateChannel updates a channel based on the provided channel struct. -func (c *Client4) UpdateChannel(channel *Channel) (*Channel, *Response) { - r, err := c.DoApiPut(c.GetChannelRoute(channel.Id), channel.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// PatchChannel partially updates a channel. Any missing fields are not updated. -func (c *Client4) PatchChannel(channelId string, patch *ChannelPatch) (*Channel, *Response) { - r, err := c.DoApiPut(c.GetChannelRoute(channelId)+"/patch", patch.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// ConvertChannelToPrivate converts public to private channel. -func (c *Client4) ConvertChannelToPrivate(channelId string) (*Channel, *Response) { - r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/convert", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// UpdateChannelPrivacy updates channel privacy -func (c *Client4) UpdateChannelPrivacy(channelId string, privacy string) (*Channel, *Response) { - requestBody := map[string]string{"privacy": privacy} - r, err := c.DoApiPut(c.GetChannelRoute(channelId)+"/privacy", MapToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// RestoreChannel restores a previously deleted channel. Any missing fields are not updated. -func (c *Client4) RestoreChannel(channelId string) (*Channel, *Response) { - r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/restore", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// CreateDirectChannel creates a direct message channel based on the two user -// ids provided. -func (c *Client4) CreateDirectChannel(userId1, userId2 string) (*Channel, *Response) { - requestBody := []string{userId1, userId2} - r, err := c.DoApiPost(c.GetChannelsRoute()+"/direct", ArrayToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// CreateGroupChannel creates a group message channel based on userIds provided. -func (c *Client4) CreateGroupChannel(userIds []string) (*Channel, *Response) { - r, err := c.DoApiPost(c.GetChannelsRoute()+"/group", ArrayToJson(userIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// GetChannel returns a channel based on the provided channel id string. -func (c *Client4) GetChannel(channelId, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelRoute(channelId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// GetChannelStats returns statistics for a channel. -func (c *Client4) GetChannelStats(channelId string, etag string) (*ChannelStats, *Response) { - r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/stats", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelStatsFromJson(r.Body), BuildResponse(r) -} - -// GetChannelMembersTimezones gets a list of timezones for a channel. -func (c *Client4) GetChannelMembersTimezones(channelId string) ([]string, *Response) { - r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/timezones", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ArrayFromJson(r.Body), BuildResponse(r) -} - -// GetPinnedPosts gets a list of pinned posts. -func (c *Client4) GetPinnedPosts(channelId string, etag string) (*PostList, *Response) { - r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/pinned", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetPrivateChannelsForTeam returns a list of private channels based on the provided team id string. -func (c *Client4) GetPrivateChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { - query := fmt.Sprintf("/private?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelSliceFromJson(r.Body), BuildResponse(r) -} - -// GetPublicChannelsForTeam returns a list of public channels based on the provided team id string. -func (c *Client4) GetPublicChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelSliceFromJson(r.Body), BuildResponse(r) -} - -// GetDeletedChannelsForTeam returns a list of public channels based on the provided team id string. -func (c *Client4) GetDeletedChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { - query := fmt.Sprintf("/deleted?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelSliceFromJson(r.Body), BuildResponse(r) -} - -// GetPublicChannelsByIdsForTeam returns a list of public channels based on provided team id string. -func (c *Client4) GetPublicChannelsByIdsForTeam(teamId string, channelIds []string) ([]*Channel, *Response) { - r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/ids", ArrayToJson(channelIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelSliceFromJson(r.Body), BuildResponse(r) -} - -// GetChannelsForTeamForUser returns a list channels of on a team for a user. -func (c *Client4) GetChannelsForTeamForUser(teamId, userId string, includeDeleted bool, etag string) ([]*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelsForTeamForUserRoute(teamId, userId, includeDeleted), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelSliceFromJson(r.Body), BuildResponse(r) -} - -// SearchChannels returns the channels on a team matching the provided search term. -func (c *Client4) SearchChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response) { - r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/search", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelSliceFromJson(r.Body), BuildResponse(r) -} - -// SearchArchivedChannels returns the archived channels on a team matching the provided search term. -func (c *Client4) SearchArchivedChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response) { - r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/search_archived", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelSliceFromJson(r.Body), BuildResponse(r) -} - -// SearchAllChannels search in all the channels. Must be a system administrator. -func (c *Client4) SearchAllChannels(search *ChannelSearch) (*ChannelListWithTeamData, *Response) { - r, err := c.DoApiPost(c.GetChannelsRoute()+"/search", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelListWithTeamDataFromJson(r.Body), BuildResponse(r) -} - -// SearchAllChannelsPaged searches all the channels and returns the results paged with the total count. -func (c *Client4) SearchAllChannelsPaged(search *ChannelSearch) (*ChannelsWithCount, *Response) { - r, err := c.DoApiPost(c.GetChannelsRoute()+"/search", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelsWithCountFromJson(r.Body), BuildResponse(r) -} - -// SearchGroupChannels returns the group channels of the user whose members' usernames match the search term. -func (c *Client4) SearchGroupChannels(search *ChannelSearch) ([]*Channel, *Response) { - r, err := c.DoApiPost(c.GetChannelsRoute()+"/group/search", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelSliceFromJson(r.Body), BuildResponse(r) -} - -// DeleteChannel deletes channel based on the provided channel id string. -func (c *Client4) DeleteChannel(channelId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetChannelRoute(channelId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// MoveChannel moves the channel to the destination team. -func (c *Client4) MoveChannel(channelId, teamId string, force bool) (*Channel, *Response) { - requestBody := map[string]interface{}{ - "team_id": teamId, - "force": force, - } - r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/move", StringInterfaceToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// GetChannelByName returns a channel based on the provided channel name and team id strings. -func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// GetChannelByNameIncludeDeleted returns a channel based on the provided channel name and team id strings. Other then GetChannelByName it will also return deleted channels. -func (c *Client4) GetChannelByNameIncludeDeleted(channelName, teamId string, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId)+"?include_deleted="+c.boolString(true), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// GetChannelByNameForTeamName returns a channel based on the provided channel name and team name strings. -func (c *Client4) GetChannelByNameForTeamName(channelName, teamName string, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// GetChannelByNameForTeamNameIncludeDeleted returns a channel based on the provided channel name and team name strings. Other then GetChannelByNameForTeamName it will also return deleted channels. -func (c *Client4) GetChannelByNameForTeamNameIncludeDeleted(channelName, teamName string, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName)+"?include_deleted="+c.boolString(true), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelFromJson(r.Body), BuildResponse(r) -} - -// GetChannelMembers gets a page of channel members. -func (c *Client4) GetChannelMembers(channelId string, page, perPage int, etag string) (*ChannelMembers, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetChannelMembersRoute(channelId)+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelMembersFromJson(r.Body), BuildResponse(r) -} - -// GetChannelMembersByIds gets the channel members in a channel for a list of user ids. -func (c *Client4) GetChannelMembersByIds(channelId string, userIds []string) (*ChannelMembers, *Response) { - r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"/ids", ArrayToJson(userIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelMembersFromJson(r.Body), BuildResponse(r) -} - -// GetChannelMember gets a channel member. -func (c *Client4) GetChannelMember(channelId, userId, etag string) (*ChannelMember, *Response) { - r, err := c.DoApiGet(c.GetChannelMemberRoute(channelId, userId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelMemberFromJson(r.Body), BuildResponse(r) -} - -// GetChannelMembersForUser gets all the channel members for a user on a team. -func (c *Client4) GetChannelMembersForUser(userId, teamId, etag string) (*ChannelMembers, *Response) { - r, err := c.DoApiGet(fmt.Sprintf(c.GetUserRoute(userId)+"/teams/%v/channels/members", teamId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelMembersFromJson(r.Body), BuildResponse(r) -} - -// ViewChannel performs a view action for a user. Synonymous with switching channels or marking channels as read by a user. -func (c *Client4) ViewChannel(userId string, view *ChannelView) (*ChannelViewResponse, *Response) { - url := fmt.Sprintf(c.GetChannelsRoute()+"/members/%v/view", userId) - r, err := c.DoApiPost(url, view.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelViewResponseFromJson(r.Body), BuildResponse(r) -} - -// GetChannelUnread will return a ChannelUnread object that contains the number of -// unread messages and mentions for a user. -func (c *Client4) GetChannelUnread(channelId, userId string) (*ChannelUnread, *Response) { - r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetChannelRoute(channelId)+"/unread", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelUnreadFromJson(r.Body), BuildResponse(r) -} - -// UpdateChannelRoles will update the roles on a channel for a user. -func (c *Client4) UpdateChannelRoles(channelId, userId, roles string) (bool, *Response) { - requestBody := map[string]string{"roles": roles} - r, err := c.DoApiPut(c.GetChannelMemberRoute(channelId, userId)+"/roles", MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateChannelMemberSchemeRoles will update the scheme-derived roles on a channel for a user. -func (c *Client4) UpdateChannelMemberSchemeRoles(channelId string, userId string, schemeRoles *SchemeRoles) (bool, *Response) { - r, err := c.DoApiPut(c.GetChannelMemberRoute(channelId, userId)+"/schemeRoles", schemeRoles.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateChannelNotifyProps will update the notification properties on a channel for a user. -func (c *Client4) UpdateChannelNotifyProps(channelId, userId string, props map[string]string) (bool, *Response) { - r, err := c.DoApiPut(c.GetChannelMemberRoute(channelId, userId)+"/notify_props", MapToJson(props)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// AddChannelMember adds user to channel and return a channel member. -func (c *Client4) AddChannelMember(channelId, userId string) (*ChannelMember, *Response) { - requestBody := map[string]string{"user_id": userId} - r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"", MapToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelMemberFromJson(r.Body), BuildResponse(r) -} - -// AddChannelMemberWithRootId adds user to channel and return a channel member. Post add to channel message has the postRootId. -func (c *Client4) AddChannelMemberWithRootId(channelId, userId, postRootId string) (*ChannelMember, *Response) { - requestBody := map[string]string{"user_id": userId, "post_root_id": postRootId} - r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"", MapToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelMemberFromJson(r.Body), BuildResponse(r) -} - -// RemoveUserFromChannel will delete the channel member object for a user, effectively removing the user from a channel. -func (c *Client4) RemoveUserFromChannel(channelId, userId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetChannelMemberRoute(channelId, userId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// AutocompleteChannelsForTeam will return an ordered list of channels autocomplete suggestions. -func (c *Client4) AutocompleteChannelsForTeam(teamId, name string) (*ChannelList, *Response) { - query := fmt.Sprintf("?name=%v", name) - r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+"/autocomplete"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelListFromJson(r.Body), BuildResponse(r) -} - -// AutocompleteChannelsForTeamForSearch will return an ordered list of your channels autocomplete suggestions. -func (c *Client4) AutocompleteChannelsForTeamForSearch(teamId, name string) (*ChannelList, *Response) { - query := fmt.Sprintf("?name=%v", name) - r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+"/search_autocomplete"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelListFromJson(r.Body), BuildResponse(r) -} - -// Post Section - -// CreatePost creates a post based on the provided post struct. -func (c *Client4) CreatePost(post *Post) (*Post, *Response) { - r, err := c.DoApiPost(c.GetPostsRoute(), post.ToUnsanitizedJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostFromJson(r.Body), BuildResponse(r) -} - -// CreatePostEphemeral creates a ephemeral post based on the provided post struct which is send to the given user id. -func (c *Client4) CreatePostEphemeral(post *PostEphemeral) (*Post, *Response) { - r, err := c.DoApiPost(c.GetPostsEphemeralRoute(), post.ToUnsanitizedJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostFromJson(r.Body), BuildResponse(r) -} - -// UpdatePost updates a post based on the provided post struct. -func (c *Client4) UpdatePost(postId string, post *Post) (*Post, *Response) { - r, err := c.DoApiPut(c.GetPostRoute(postId), post.ToUnsanitizedJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostFromJson(r.Body), BuildResponse(r) -} - -// PatchPost partially updates a post. Any missing fields are not updated. -func (c *Client4) PatchPost(postId string, patch *PostPatch) (*Post, *Response) { - r, err := c.DoApiPut(c.GetPostRoute(postId)+"/patch", patch.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostFromJson(r.Body), BuildResponse(r) -} - -// SetPostUnread marks channel where post belongs as unread on the time of the provided post. -func (c *Client4) SetPostUnread(userId string, postId string) *Response { - r, err := c.DoApiPost(c.GetUserRoute(userId)+c.GetPostRoute(postId)+"/set_unread", "") - if err != nil { - return BuildErrorResponse(r, err) - } - defer closeBody(r) - return BuildResponse(r) -} - -// PinPost pin a post based on provided post id string. -func (c *Client4) PinPost(postId string) (bool, *Response) { - r, err := c.DoApiPost(c.GetPostRoute(postId)+"/pin", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UnpinPost unpin a post based on provided post id string. -func (c *Client4) UnpinPost(postId string) (bool, *Response) { - r, err := c.DoApiPost(c.GetPostRoute(postId)+"/unpin", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetPost gets a single post. -func (c *Client4) GetPost(postId string, etag string) (*Post, *Response) { - r, err := c.DoApiGet(c.GetPostRoute(postId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostFromJson(r.Body), BuildResponse(r) -} - -// DeletePost deletes a post from the provided post id string. -func (c *Client4) DeletePost(postId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetPostRoute(postId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetPostThread gets a post with all the other posts in the same thread. -func (c *Client4) GetPostThread(postId string, etag string) (*PostList, *Response) { - r, err := c.DoApiGet(c.GetPostRoute(postId)+"/thread", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetPostsForChannel gets a page of posts with an array for ordering for a channel. -func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag string) (*PostList, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetFlaggedPostsForUser returns flagged posts of a user based on user id string. -func (c *Client4) GetFlaggedPostsForUser(userId string, page int, perPage int) (*PostList, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetFlaggedPostsForUserInTeam returns flagged posts in team of a user based on user id string. -func (c *Client4) GetFlaggedPostsForUserInTeam(userId string, teamId string, page int, perPage int) (*PostList, *Response) { - if !IsValidId(teamId) { - return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError("GetFlaggedPostsForUserInTeam", "model.client.get_flagged_posts_in_team.missing_parameter.app_error", nil, "", http.StatusBadRequest)} - } - - query := fmt.Sprintf("?team_id=%v&page=%v&per_page=%v", teamId, page, perPage) - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetFlaggedPostsForUserInChannel returns flagged posts in channel of a user based on user id string. -func (c *Client4) GetFlaggedPostsForUserInChannel(userId string, channelId string, page int, perPage int) (*PostList, *Response) { - if !IsValidId(channelId) { - return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError("GetFlaggedPostsForUserInChannel", "model.client.get_flagged_posts_in_channel.missing_parameter.app_error", nil, "", http.StatusBadRequest)} - } - - query := fmt.Sprintf("?channel_id=%v&page=%v&per_page=%v", channelId, page, perPage) - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetPostsSince gets posts created after a specified time as Unix time in milliseconds. -func (c *Client4) GetPostsSince(channelId string, time int64) (*PostList, *Response) { - query := fmt.Sprintf("?since=%v", time) - r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetPostsAfter gets a page of posts that were posted after the post provided. -func (c *Client4) GetPostsAfter(channelId, postId string, page, perPage int, etag string) (*PostList, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&after=%v", page, perPage, postId) - r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetPostsBefore gets a page of posts that were posted before the post provided. -func (c *Client4) GetPostsBefore(channelId, postId string, page, perPage int, etag string) (*PostList, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&before=%v", page, perPage, postId) - r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// GetPostsAroundLastUnread gets a list of posts around last unread post by a user in a channel. -func (c *Client4) GetPostsAroundLastUnread(userId, channelId string, limitBefore, limitAfter int) (*PostList, *Response) { - query := fmt.Sprintf("?limit_before=%v&limit_after=%v", limitBefore, limitAfter) - if r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetChannelRoute(channelId)+"/posts/unread"+query, ""); err != nil { - return nil, BuildErrorResponse(r, err) - } else { - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) - } -} - -// SearchPosts returns any posts with matching terms string. -func (c *Client4) SearchPosts(teamId string, terms string, isOrSearch bool) (*PostList, *Response) { - params := SearchParameter{ - Terms: &terms, - IsOrSearch: &isOrSearch, - } - return c.SearchPostsWithParams(teamId, ¶ms) -} - -// SearchPostsWithParams returns any posts with matching terms string. -func (c *Client4) SearchPostsWithParams(teamId string, params *SearchParameter) (*PostList, *Response) { - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/posts/search", params.SearchParameterToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) -} - -// SearchPostsWithMatches returns any posts with matching terms string, including. -func (c *Client4) SearchPostsWithMatches(teamId string, terms string, isOrSearch bool) (*PostSearchResults, *Response) { - requestBody := map[string]interface{}{"terms": terms, "is_or_search": isOrSearch} - r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/posts/search", StringInterfaceToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PostSearchResultsFromJson(r.Body), BuildResponse(r) -} - -// DoPostAction performs a post action. -func (c *Client4) DoPostAction(postId, actionId string) (bool, *Response) { - r, err := c.DoApiPost(c.GetPostRoute(postId)+"/actions/"+actionId, "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// DoPostActionWithCookie performs a post action with extra arguments -func (c *Client4) DoPostActionWithCookie(postId, actionId, selected, cookieStr string) (bool, *Response) { - var body []byte - if selected != "" || cookieStr != "" { - body, _ = json.Marshal(DoPostActionRequest{ - SelectedOption: selected, - Cookie: cookieStr, - }) - } - r, err := c.DoApiPost(c.GetPostRoute(postId)+"/actions/"+actionId, string(body)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// OpenInteractiveDialog sends a WebSocket event to a user's clients to -// open interactive dialogs, based on the provided trigger ID and other -// provided data. Used with interactive message buttons, menus and -// slash commands. -func (c *Client4) OpenInteractiveDialog(request OpenDialogRequest) (bool, *Response) { - b, _ := json.Marshal(request) - r, err := c.DoApiPost("/actions/dialogs/open", string(b)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// SubmitInteractiveDialog will submit the provided dialog data to the integration -// configured by the URL. Used with the interactive dialogs integration feature. -func (c *Client4) SubmitInteractiveDialog(request SubmitDialogRequest) (*SubmitDialogResponse, *Response) { - b, _ := json.Marshal(request) - r, err := c.DoApiPost("/actions/dialogs/submit", string(b)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - var resp SubmitDialogResponse - json.NewDecoder(r.Body).Decode(&resp) - return &resp, BuildResponse(r) -} - -// UploadFile will upload a file to a channel using a multipart request, to be later attached to a post. -// This method is functionally equivalent to Client4.UploadFileAsRequestBody. -func (c *Client4) UploadFile(data []byte, channelId string, filename string) (*FileUploadResponse, *Response) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormField("channel_id") - if err != nil { - return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - _, err = io.Copy(part, strings.NewReader(channelId)) - if err != nil { - return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - part, err = writer.CreateFormFile("files", filename) - if err != nil { - return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - _, err = io.Copy(part, bytes.NewBuffer(data)) - if err != nil { - return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - err = writer.Close() - if err != nil { - return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - return c.DoUploadFile(c.GetFilesRoute(), body.Bytes(), writer.FormDataContentType()) -} - -// UploadFileAsRequestBody will upload a file to a channel as the body of a request, to be later attached -// to a post. This method is functionally equivalent to Client4.UploadFile. -func (c *Client4) UploadFileAsRequestBody(data []byte, channelId string, filename string) (*FileUploadResponse, *Response) { - return c.DoUploadFile(c.GetFilesRoute()+fmt.Sprintf("?channel_id=%v&filename=%v", url.QueryEscape(channelId), url.QueryEscape(filename)), data, http.DetectContentType(data)) -} - -// GetFile gets the bytes for a file by id. -func (c *Client4) GetFile(fileId string) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetFileRoute(fileId), "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetFile", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// DownloadFile gets the bytes for a file by id, optionally adding headers to force the browser to download it. -func (c *Client4) DownloadFile(fileId string, download bool) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("?download=%v", download), "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("DownloadFile", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// GetFileThumbnail gets the bytes for a file by id. -func (c *Client4) GetFileThumbnail(fileId string) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetFileRoute(fileId)+"/thumbnail", "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetFileThumbnail", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// DownloadFileThumbnail gets the bytes for a file by id, optionally adding headers to force the browser to download it. -func (c *Client4) DownloadFileThumbnail(fileId string, download bool) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("/thumbnail?download=%v", download), "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("DownloadFileThumbnail", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// GetFileLink gets the public link of a file by id. -func (c *Client4) GetFileLink(fileId string) (string, *Response) { - r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/link", "") - if err != nil { - return "", BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body)["link"], BuildResponse(r) -} - -// GetFilePreview gets the bytes for a file by id. -func (c *Client4) GetFilePreview(fileId string) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetFileRoute(fileId)+"/preview", "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetFilePreview", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// DownloadFilePreview gets the bytes for a file by id. -func (c *Client4) DownloadFilePreview(fileId string, download bool) ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("/preview?download=%v", download), "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("DownloadFilePreview", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - return data, BuildResponse(r) -} - -// GetFileInfo gets all the file info objects. -func (c *Client4) GetFileInfo(fileId string) (*FileInfo, *Response) { - r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/info", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return FileInfoFromJson(r.Body), BuildResponse(r) -} - -// GetFileInfosForPost gets all the file info objects attached to a post. -func (c *Client4) GetFileInfosForPost(postId string, etag string) ([]*FileInfo, *Response) { - r, err := c.DoApiGet(c.GetPostRoute(postId)+"/files/info", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return FileInfosFromJson(r.Body), BuildResponse(r) -} - -// General/System Section - -// GetPing will return ok if the running goRoutines are below the threshold and unhealthy for above. -func (c *Client4) GetPing() (string, *Response) { - r, err := c.DoApiGet(c.GetSystemRoute()+"/ping", "") - if r != nil && r.StatusCode == 500 { - defer r.Body.Close() - return STATUS_UNHEALTHY, BuildErrorResponse(r, err) - } - if err != nil { - return "", BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body)["status"], BuildResponse(r) -} - -// GetPingWithServerStatus will return ok if several basic server health checks -// all pass successfully. -func (c *Client4) GetPingWithServerStatus() (string, *Response) { - r, err := c.DoApiGet(c.GetSystemRoute()+"/ping?get_server_status="+c.boolString(true), "") - if r != nil && r.StatusCode == 500 { - defer r.Body.Close() - return STATUS_UNHEALTHY, BuildErrorResponse(r, err) - } - if err != nil { - return "", BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body)["status"], BuildResponse(r) -} - -// TestEmail will attempt to connect to the configured SMTP server. -func (c *Client4) TestEmail(config *Config) (bool, *Response) { - r, err := c.DoApiPost(c.GetTestEmailRoute(), config.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// TestSiteURL will test the validity of a site URL. -func (c *Client4) TestSiteURL(siteURL string) (bool, *Response) { - requestBody := make(map[string]string) - requestBody["site_url"] = siteURL - r, err := c.DoApiPost(c.GetTestSiteURLRoute(), MapToJson(requestBody)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// TestS3Connection will attempt to connect to the AWS S3. -func (c *Client4) TestS3Connection(config *Config) (bool, *Response) { - r, err := c.DoApiPost(c.GetTestS3Route(), config.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetConfig will retrieve the server config with some sanitized items. -func (c *Client4) GetConfig() (*Config, *Response) { - r, err := c.DoApiGet(c.GetConfigRoute(), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ConfigFromJson(r.Body), BuildResponse(r) -} - -// ReloadConfig will reload the server configuration. -func (c *Client4) ReloadConfig() (bool, *Response) { - r, err := c.DoApiPost(c.GetConfigRoute()+"/reload", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetOldClientConfig will retrieve the parts of the server configuration needed by the -// client, formatted in the old format. -func (c *Client4) GetOldClientConfig(etag string) (map[string]string, *Response) { - r, err := c.DoApiGet(c.GetConfigRoute()+"/client?format=old", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body), BuildResponse(r) -} - -// GetEnvironmentConfig will retrieve a map mirroring the server configuration where fields -// are set to true if the corresponding config setting is set through an environment variable. -// Settings that haven't been set through environment variables will be missing from the map. -func (c *Client4) GetEnvironmentConfig() (map[string]interface{}, *Response) { - r, err := c.DoApiGet(c.GetConfigRoute()+"/environment", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return StringInterfaceFromJson(r.Body), BuildResponse(r) -} - -// GetOldClientLicense will retrieve the parts of the server license needed by the -// client, formatted in the old format. -func (c *Client4) GetOldClientLicense(etag string) (map[string]string, *Response) { - r, err := c.DoApiGet(c.GetLicenseRoute()+"/client?format=old", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body), BuildResponse(r) -} - -// DatabaseRecycle will recycle the connections. Discard current connection and get new one. -func (c *Client4) DatabaseRecycle() (bool, *Response) { - r, err := c.DoApiPost(c.GetDatabaseRoute()+"/recycle", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// InvalidateCaches will purge the cache and can affect the performance while is cleaning. -func (c *Client4) InvalidateCaches() (bool, *Response) { - r, err := c.DoApiPost(c.GetCacheRoute()+"/invalidate", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateConfig will update the server configuration. -func (c *Client4) UpdateConfig(config *Config) (*Config, *Response) { - r, err := c.DoApiPut(c.GetConfigRoute(), config.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ConfigFromJson(r.Body), BuildResponse(r) -} - -// UploadLicenseFile will add a license file to the system. -func (c *Client4) UploadLicenseFile(data []byte) (bool, *Response) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormFile("license", "test-license.mattermost-license") - if err != nil { - return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { - return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if err = writer.Close(); err != nil { - return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - rq, err := http.NewRequest("POST", c.ApiUrl+c.GetLicenseRoute(), bytes.NewReader(body.Bytes())) - if err != nil { - return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", writer.FormDataContentType()) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetLicenseRoute(), "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return CheckStatusOK(rp), BuildResponse(rp) -} - -// RemoveLicenseFile will remove the server license it exists. Note that this will -// disable all enterprise features. -func (c *Client4) RemoveLicenseFile() (bool, *Response) { - r, err := c.DoApiDelete(c.GetLicenseRoute()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetAnalyticsOld will retrieve analytics using the old format. New format is not -// available but the "/analytics" endpoint is reserved for it. The "name" argument is optional -// and defaults to "standard". The "teamId" argument is optional and will limit results -// to a specific team. -func (c *Client4) GetAnalyticsOld(name, teamId string) (AnalyticsRows, *Response) { - query := fmt.Sprintf("?name=%v&team_id=%v", name, teamId) - r, err := c.DoApiGet(c.GetAnalyticsRoute()+"/old"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return AnalyticsRowsFromJson(r.Body), BuildResponse(r) -} - -// Webhooks Section - -// CreateIncomingWebhook creates an incoming webhook for a channel. -func (c *Client4) CreateIncomingWebhook(hook *IncomingWebhook) (*IncomingWebhook, *Response) { - r, err := c.DoApiPost(c.GetIncomingWebhooksRoute(), hook.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return IncomingWebhookFromJson(r.Body), BuildResponse(r) -} - -// UpdateIncomingWebhook updates an incoming webhook for a channel. -func (c *Client4) UpdateIncomingWebhook(hook *IncomingWebhook) (*IncomingWebhook, *Response) { - r, err := c.DoApiPut(c.GetIncomingWebhookRoute(hook.Id), hook.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return IncomingWebhookFromJson(r.Body), BuildResponse(r) -} - -// GetIncomingWebhooks returns a page of incoming webhooks on the system. Page counting starts at 0. -func (c *Client4) GetIncomingWebhooks(page int, perPage int, etag string) ([]*IncomingWebhook, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetIncomingWebhooksRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return IncomingWebhookListFromJson(r.Body), BuildResponse(r) -} - -// GetIncomingWebhooksForTeam returns a page of incoming webhooks for a team. Page counting starts at 0. -func (c *Client4) GetIncomingWebhooksForTeam(teamId string, page int, perPage int, etag string) ([]*IncomingWebhook, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&team_id=%v", page, perPage, teamId) - r, err := c.DoApiGet(c.GetIncomingWebhooksRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return IncomingWebhookListFromJson(r.Body), BuildResponse(r) -} - -// GetIncomingWebhook returns an Incoming webhook given the hook ID. -func (c *Client4) GetIncomingWebhook(hookID string, etag string) (*IncomingWebhook, *Response) { - r, err := c.DoApiGet(c.GetIncomingWebhookRoute(hookID), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return IncomingWebhookFromJson(r.Body), BuildResponse(r) -} - -// DeleteIncomingWebhook deletes and Incoming Webhook given the hook ID. -func (c *Client4) DeleteIncomingWebhook(hookID string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetIncomingWebhookRoute(hookID)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// CreateOutgoingWebhook creates an outgoing webhook for a team or channel. -func (c *Client4) CreateOutgoingWebhook(hook *OutgoingWebhook) (*OutgoingWebhook, *Response) { - r, err := c.DoApiPost(c.GetOutgoingWebhooksRoute(), hook.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OutgoingWebhookFromJson(r.Body), BuildResponse(r) -} - -// UpdateOutgoingWebhook creates an outgoing webhook for a team or channel. -func (c *Client4) UpdateOutgoingWebhook(hook *OutgoingWebhook) (*OutgoingWebhook, *Response) { - r, err := c.DoApiPut(c.GetOutgoingWebhookRoute(hook.Id), hook.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OutgoingWebhookFromJson(r.Body), BuildResponse(r) -} - -// GetOutgoingWebhooks returns a page of outgoing webhooks on the system. Page counting starts at 0. -func (c *Client4) GetOutgoingWebhooks(page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OutgoingWebhookListFromJson(r.Body), BuildResponse(r) -} - -// GetOutgoingWebhook outgoing webhooks on the system requested by Hook Id. -func (c *Client4) GetOutgoingWebhook(hookId string) (*OutgoingWebhook, *Response) { - r, err := c.DoApiGet(c.GetOutgoingWebhookRoute(hookId), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OutgoingWebhookFromJson(r.Body), BuildResponse(r) -} - -// GetOutgoingWebhooksForChannel returns a page of outgoing webhooks for a channel. Page counting starts at 0. -func (c *Client4) GetOutgoingWebhooksForChannel(channelId string, page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&channel_id=%v", page, perPage, channelId) - r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OutgoingWebhookListFromJson(r.Body), BuildResponse(r) -} - -// GetOutgoingWebhooksForTeam returns a page of outgoing webhooks for a team. Page counting starts at 0. -func (c *Client4) GetOutgoingWebhooksForTeam(teamId string, page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&team_id=%v", page, perPage, teamId) - r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OutgoingWebhookListFromJson(r.Body), BuildResponse(r) -} - -// RegenOutgoingHookToken regenerate the outgoing webhook token. -func (c *Client4) RegenOutgoingHookToken(hookId string) (*OutgoingWebhook, *Response) { - r, err := c.DoApiPost(c.GetOutgoingWebhookRoute(hookId)+"/regen_token", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OutgoingWebhookFromJson(r.Body), BuildResponse(r) -} - -// DeleteOutgoingWebhook delete the outgoing webhook on the system requested by Hook Id. -func (c *Client4) DeleteOutgoingWebhook(hookId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetOutgoingWebhookRoute(hookId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// Preferences Section - -// GetPreferences returns the user's preferences. -func (c *Client4) GetPreferences(userId string) (Preferences, *Response) { - r, err := c.DoApiGet(c.GetPreferencesRoute(userId), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - preferences, _ := PreferencesFromJson(r.Body) - return preferences, BuildResponse(r) -} - -// UpdatePreferences saves the user's preferences. -func (c *Client4) UpdatePreferences(userId string, preferences *Preferences) (bool, *Response) { - r, err := c.DoApiPut(c.GetPreferencesRoute(userId), preferences.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return true, BuildResponse(r) -} - -// DeletePreferences deletes the user's preferences. -func (c *Client4) DeletePreferences(userId string, preferences *Preferences) (bool, *Response) { - r, err := c.DoApiPost(c.GetPreferencesRoute(userId)+"/delete", preferences.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return true, BuildResponse(r) -} - -// GetPreferencesByCategory returns the user's preferences from the provided category string. -func (c *Client4) GetPreferencesByCategory(userId string, category string) (Preferences, *Response) { - url := fmt.Sprintf(c.GetPreferencesRoute(userId)+"/%s", category) - r, err := c.DoApiGet(url, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - preferences, _ := PreferencesFromJson(r.Body) - return preferences, BuildResponse(r) -} - -// GetPreferenceByCategoryAndName returns the user's preferences from the provided category and preference name string. -func (c *Client4) GetPreferenceByCategoryAndName(userId string, category string, preferenceName string) (*Preference, *Response) { - url := fmt.Sprintf(c.GetPreferencesRoute(userId)+"/%s/name/%v", category, preferenceName) - r, err := c.DoApiGet(url, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PreferenceFromJson(r.Body), BuildResponse(r) -} - -// SAML Section - -// GetSamlMetadata returns metadata for the SAML configuration. -func (c *Client4) GetSamlMetadata() (string, *Response) { - r, err := c.DoApiGet(c.GetSamlRoute()+"/metadata", "") - if err != nil { - return "", BuildErrorResponse(r, err) - } - defer closeBody(r) - buf := new(bytes.Buffer) - _, _ = buf.ReadFrom(r.Body) - return buf.String(), BuildResponse(r) -} - -func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormFile("certificate", filename) - if err != nil { - return nil, nil, err - } - - if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { - return nil, nil, err - } - - if err := writer.Close(); err != nil { - return nil, nil, err - } - - return body.Bytes(), writer, nil -} - -// UploadSamlIdpCertificate will upload an IDP certificate for SAML and set the config to use it. -// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. -func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) - if err != nil { - return false, &Response{Error: NewAppError("UploadSamlIdpCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/idp", body, writer.FormDataContentType()) - return resp.Error == nil, resp -} - -// UploadSamlPublicCertificate will upload a public certificate for SAML and set the config to use it. -// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. -func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) - if err != nil { - return false, &Response{Error: NewAppError("UploadSamlPublicCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/public", body, writer.FormDataContentType()) - return resp.Error == nil, resp -} - -// UploadSamlPrivateCertificate will upload a private key for SAML and set the config to use it. -// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. -func (c *Client4) UploadSamlPrivateCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) - if err != nil { - return false, &Response{Error: NewAppError("UploadSamlPrivateCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/private", body, writer.FormDataContentType()) - return resp.Error == nil, resp -} - -// DeleteSamlIdpCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. -func (c *Client4) DeleteSamlIdpCertificate() (bool, *Response) { - r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/idp") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// DeleteSamlPublicCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. -func (c *Client4) DeleteSamlPublicCertificate() (bool, *Response) { - r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/public") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// DeleteSamlPrivateCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. -func (c *Client4) DeleteSamlPrivateCertificate() (bool, *Response) { - r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/private") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetSamlCertificateStatus returns metadata for the SAML configuration. -func (c *Client4) GetSamlCertificateStatus() (*SamlCertificateStatus, *Response) { - r, err := c.DoApiGet(c.GetSamlRoute()+"/certificate/status", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return SamlCertificateStatusFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) GetSamlMetadataFromIdp(samlMetadataURL string) (*SamlMetadataResponse, *Response) { - requestBody := make(map[string]string) - requestBody["saml_metadata_url"] = samlMetadataURL - r, err := c.DoApiPost(c.GetSamlRoute()+"/metadatafromidp", MapToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - - defer closeBody(r) - return SamlMetadataResponseFromJson(r.Body), BuildResponse(r) -} - -// Compliance Section - -// CreateComplianceReport creates an incoming webhook for a channel. -func (c *Client4) CreateComplianceReport(report *Compliance) (*Compliance, *Response) { - r, err := c.DoApiPost(c.GetComplianceReportsRoute(), report.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ComplianceFromJson(r.Body), BuildResponse(r) -} - -// GetComplianceReports returns list of compliance reports. -func (c *Client4) GetComplianceReports(page, perPage int) (Compliances, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetComplianceReportsRoute()+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CompliancesFromJson(r.Body), BuildResponse(r) -} - -// GetComplianceReport returns a compliance report. -func (c *Client4) GetComplianceReport(reportId string) (*Compliance, *Response) { - r, err := c.DoApiGet(c.GetComplianceReportRoute(reportId), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ComplianceFromJson(r.Body), BuildResponse(r) -} - -// DownloadComplianceReport returns a full compliance report as a file. -func (c *Client4) DownloadComplianceReport(reportId string) ([]byte, *Response) { - rq, err := http.NewRequest("GET", c.ApiUrl+c.GetComplianceReportRoute(reportId), nil) - if err != nil { - return nil, &Response{Error: NewAppError("DownloadComplianceReport", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return nil, &Response{Error: NewAppError("DownloadComplianceReport", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - data, err := ioutil.ReadAll(rp.Body) - if err != nil { - return nil, BuildErrorResponse(rp, NewAppError("DownloadComplianceReport", "model.client.read_file.app_error", nil, err.Error(), rp.StatusCode)) - } - - return data, BuildResponse(rp) -} - -// Cluster Section - -// GetClusterStatus returns the status of all the configured cluster nodes. -func (c *Client4) GetClusterStatus() ([]*ClusterInfo, *Response) { - r, err := c.DoApiGet(c.GetClusterRoute()+"/status", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ClusterInfosFromJson(r.Body), BuildResponse(r) -} - -// LDAP Section - -// SyncLdap will force a sync with the configured LDAP server. -func (c *Client4) SyncLdap() (bool, *Response) { - r, err := c.DoApiPost(c.GetLdapRoute()+"/sync", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// TestLdap will attempt to connect to the configured LDAP server and return OK if configured -// correctly. -func (c *Client4) TestLdap() (bool, *Response) { - r, err := c.DoApiPost(c.GetLdapRoute()+"/test", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetLdapGroups retrieves the immediate child groups of the given parent group. -func (c *Client4) GetLdapGroups() ([]*Group, *Response) { - path := fmt.Sprintf("%s/groups", c.GetLdapRoute()) - - r, appErr := c.DoApiGet(path, "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - return GroupsFromJson(r.Body), BuildResponse(r) -} - -// LinkLdapGroup creates or undeletes a Mattermost group and associates it to the given LDAP group DN. -func (c *Client4) LinkLdapGroup(dn string) (*Group, *Response) { - path := fmt.Sprintf("%s/groups/%s/link", c.GetLdapRoute(), dn) - - r, appErr := c.DoApiPost(path, "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - return GroupFromJson(r.Body), BuildResponse(r) -} - -// UnlinkLdapGroup deletes the Mattermost group associated with the given LDAP group DN. -func (c *Client4) UnlinkLdapGroup(dn string) (*Group, *Response) { - path := fmt.Sprintf("%s/groups/%s/link", c.GetLdapRoute(), dn) - - r, appErr := c.DoApiDelete(path) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - return GroupFromJson(r.Body), BuildResponse(r) -} - -// MigrateIdLdap migrates the LDAP enabled users to given attribute -func (c *Client4) MigrateIdLdap(toAttribute string) (bool, *Response) { - r, err := c.DoApiPost(c.GetLdapRoute()+"/migrateid", MapToJson(map[string]string{ - "toAttribute": toAttribute, - })) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetGroupsByChannel retrieves the Mattermost Groups associated with a given channel -func (c *Client4) GetGroupsByChannel(channelId string, opts GroupSearchOpts) ([]*GroupWithSchemeAdmin, int, *Response) { - path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v&filter_allow_reference=%v", c.GetChannelRoute(channelId), opts.Q, opts.IncludeMemberCount, opts.FilterAllowReference) - if opts.PageOpts != nil { - path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) - } - r, appErr := c.DoApiGet(path, "") - if appErr != nil { - return nil, 0, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - responseData := struct { - Groups []*GroupWithSchemeAdmin `json:"groups"` - Count int `json:"total_group_count"` - }{} - if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { - appErr := NewAppError("Api4.GetGroupsByChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) - return nil, 0, BuildErrorResponse(r, appErr) - } - - return responseData.Groups, responseData.Count, BuildResponse(r) -} - -// GetGroupsByTeam retrieves the Mattermost Groups associated with a given team -func (c *Client4) GetGroupsByTeam(teamId string, opts GroupSearchOpts) ([]*GroupWithSchemeAdmin, int, *Response) { - path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v&filter_allow_reference=%v", c.GetTeamRoute(teamId), opts.Q, opts.IncludeMemberCount, opts.FilterAllowReference) - if opts.PageOpts != nil { - path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) - } - r, appErr := c.DoApiGet(path, "") - if appErr != nil { - return nil, 0, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - responseData := struct { - Groups []*GroupWithSchemeAdmin `json:"groups"` - Count int `json:"total_group_count"` - }{} - if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { - appErr := NewAppError("Api4.GetGroupsByTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) - return nil, 0, BuildErrorResponse(r, appErr) - } - - return responseData.Groups, responseData.Count, BuildResponse(r) -} - -// GetGroupsAssociatedToChannelsByTeam retrieves the Mattermost Groups associated with channels in a given team -func (c *Client4) GetGroupsAssociatedToChannelsByTeam(teamId string, opts GroupSearchOpts) (map[string][]*GroupWithSchemeAdmin, *Response) { - path := fmt.Sprintf("%s/groups_by_channels?q=%v&filter_allow_reference=%v", c.GetTeamRoute(teamId), opts.Q, opts.FilterAllowReference) - if opts.PageOpts != nil { - path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) - } - r, appErr := c.DoApiGet(path, "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - responseData := struct { - GroupsAssociatedToChannels map[string][]*GroupWithSchemeAdmin `json:"groups"` - }{} - if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { - appErr := NewAppError("Api4.GetGroupsAssociatedToChannelsByTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) - return nil, BuildErrorResponse(r, appErr) - } - - return responseData.GroupsAssociatedToChannels, BuildResponse(r) -} - -// GetGroups retrieves Mattermost Groups -func (c *Client4) GetGroups(opts GroupSearchOpts) ([]*Group, *Response) { - path := fmt.Sprintf( - "%s?include_member_count=%v¬_associated_to_team=%v¬_associated_to_channel=%v&filter_allow_reference=%v&q=%v&filter_parent_team_permitted=%v", - c.GetGroupsRoute(), - opts.IncludeMemberCount, - opts.NotAssociatedToTeam, - opts.NotAssociatedToChannel, - opts.FilterAllowReference, - opts.Q, - opts.FilterParentTeamPermitted, - ) - if opts.Since > 0 { - path = fmt.Sprintf("%s&since=%v", path, opts.Since) - } - if opts.PageOpts != nil { - path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) - } - r, appErr := c.DoApiGet(path, "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - return GroupsFromJson(r.Body), BuildResponse(r) -} - -// GetGroupsByUserId retrieves Mattermost Groups for a user -func (c *Client4) GetGroupsByUserId(userId string) ([]*Group, *Response) { - path := fmt.Sprintf( - "%s/%v/groups", - c.GetUsersRoute(), - userId, - ) - - r, appErr := c.DoApiGet(path, "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return GroupsFromJson(r.Body), BuildResponse(r) -} - -// Audits Section - -// GetAudits returns a list of audits for the whole system. -func (c *Client4) GetAudits(page int, perPage int, etag string) (Audits, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet("/audits"+query, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return AuditsFromJson(r.Body), BuildResponse(r) -} - -// Brand Section - -// GetBrandImage retrieves the previously uploaded brand image. -func (c *Client4) GetBrandImage() ([]byte, *Response) { - r, appErr := c.DoApiGet(c.GetBrandRoute()+"/image", "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - - if r.StatusCode >= 300 { - return nil, BuildErrorResponse(r, AppErrorFromJson(r.Body)) - } - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetBrandImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - - return data, BuildResponse(r) -} - -// DeleteBrandImage deletes the brand image for the system. -func (c *Client4) DeleteBrandImage() *Response { - r, err := c.DoApiDelete(c.GetBrandRoute() + "/image") - if err != nil { - return BuildErrorResponse(r, err) - } - return BuildResponse(r) -} - -// UploadBrandImage sets the brand image for the system. -func (c *Client4) UploadBrandImage(data []byte) (bool, *Response) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormFile("image", "brand.png") - if err != nil { - return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { - return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - if err = writer.Close(); err != nil { - return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)} - } - - rq, err := http.NewRequest("POST", c.ApiUrl+c.GetBrandRoute()+"/image", bytes.NewReader(body.Bytes())) - if err != nil { - return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", writer.FormDataContentType()) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetBrandRoute()+"/image", "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return CheckStatusOK(rp), BuildResponse(rp) -} - -// Logs Section - -// GetLogs page of logs as a string array. -func (c *Client4) GetLogs(page, perPage int) ([]string, *Response) { - query := fmt.Sprintf("?page=%v&logs_per_page=%v", page, perPage) - r, err := c.DoApiGet("/logs"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ArrayFromJson(r.Body), BuildResponse(r) -} - -// PostLog is a convenience Web Service call so clients can log messages into -// the server-side logs. For example we typically log javascript error messages -// into the server-side. It returns the log message if the logging was successful. -func (c *Client4) PostLog(message map[string]string) (map[string]string, *Response) { - r, err := c.DoApiPost("/logs", MapToJson(message)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body), BuildResponse(r) -} - -// OAuth Section - -// CreateOAuthApp will register a new OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. -func (c *Client4) CreateOAuthApp(app *OAuthApp) (*OAuthApp, *Response) { - r, err := c.DoApiPost(c.GetOAuthAppsRoute(), app.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OAuthAppFromJson(r.Body), BuildResponse(r) -} - -// UpdateOAuthApp updates a page of registered OAuth 2.0 client applications with Mattermost acting as an OAuth 2.0 service provider. -func (c *Client4) UpdateOAuthApp(app *OAuthApp) (*OAuthApp, *Response) { - r, err := c.DoApiPut(c.GetOAuthAppRoute(app.Id), app.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OAuthAppFromJson(r.Body), BuildResponse(r) -} - -// GetOAuthApps gets a page of registered OAuth 2.0 client applications with Mattermost acting as an OAuth 2.0 service provider. -func (c *Client4) GetOAuthApps(page, perPage int) ([]*OAuthApp, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetOAuthAppsRoute()+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OAuthAppListFromJson(r.Body), BuildResponse(r) -} - -// GetOAuthApp gets a registered OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. -func (c *Client4) GetOAuthApp(appId string) (*OAuthApp, *Response) { - r, err := c.DoApiGet(c.GetOAuthAppRoute(appId), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OAuthAppFromJson(r.Body), BuildResponse(r) -} - -// GetOAuthAppInfo gets a sanitized version of a registered OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. -func (c *Client4) GetOAuthAppInfo(appId string) (*OAuthApp, *Response) { - r, err := c.DoApiGet(c.GetOAuthAppRoute(appId)+"/info", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OAuthAppFromJson(r.Body), BuildResponse(r) -} - -// DeleteOAuthApp deletes a registered OAuth 2.0 client application. -func (c *Client4) DeleteOAuthApp(appId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetOAuthAppRoute(appId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// RegenerateOAuthAppSecret regenerates the client secret for a registered OAuth 2.0 client application. -func (c *Client4) RegenerateOAuthAppSecret(appId string) (*OAuthApp, *Response) { - r, err := c.DoApiPost(c.GetOAuthAppRoute(appId)+"/regen_secret", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OAuthAppFromJson(r.Body), BuildResponse(r) -} - -// GetAuthorizedOAuthAppsForUser gets a page of OAuth 2.0 client applications the user has authorized to use access their account. -func (c *Client4) GetAuthorizedOAuthAppsForUser(userId string, page, perPage int) ([]*OAuthApp, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetUserRoute(userId)+"/oauth/apps/authorized"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return OAuthAppListFromJson(r.Body), BuildResponse(r) -} - -// AuthorizeOAuthApp will authorize an OAuth 2.0 client application to access a user's account and provide a redirect link to follow. -func (c *Client4) AuthorizeOAuthApp(authRequest *AuthorizeRequest) (string, *Response) { - r, err := c.DoApiRequest(http.MethodPost, c.Url+"/oauth/authorize", authRequest.ToJson(), "") - if err != nil { - return "", BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body)["redirect"], BuildResponse(r) -} - -// DeauthorizeOAuthApp will deauthorize an OAuth 2.0 client application from accessing a user's account. -func (c *Client4) DeauthorizeOAuthApp(appId string) (bool, *Response) { - requestData := map[string]string{"client_id": appId} - r, err := c.DoApiRequest(http.MethodPost, c.Url+"/oauth/deauthorize", MapToJson(requestData), "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetOAuthAccessToken is a test helper function for the OAuth access token endpoint. -func (c *Client4) GetOAuthAccessToken(data url.Values) (*AccessResponse, *Response) { - rq, err := http.NewRequest(http.MethodPost, c.Url+"/oauth/access_token", strings.NewReader(data.Encode())) - if err != nil { - return nil, &Response{Error: NewAppError(c.Url+"/oauth/access_token", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.Url+"/oauth/access_token", "model.client.connecting.app_error", nil, err.Error(), 403)} - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return AccessResponseFromJson(rp.Body), BuildResponse(rp) -} - -// Elasticsearch Section - -// TestElasticsearch will attempt to connect to the configured Elasticsearch server and return OK if configured. -// correctly. -func (c *Client4) TestElasticsearch() (bool, *Response) { - r, err := c.DoApiPost(c.GetElasticsearchRoute()+"/test", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// PurgeElasticsearchIndexes immediately deletes all Elasticsearch indexes. -func (c *Client4) PurgeElasticsearchIndexes() (bool, *Response) { - r, err := c.DoApiPost(c.GetElasticsearchRoute()+"/purge_indexes", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// Bleve Section - -// PurgeBleveIndexes immediately deletes all Bleve indexes. -func (c *Client4) PurgeBleveIndexes() (bool, *Response) { - r, err := c.DoApiPost(c.GetBleveRoute()+"/purge_indexes", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// Data Retention Section - -// GetDataRetentionPolicy will get the current server data retention policy details. -func (c *Client4) GetDataRetentionPolicy() (*DataRetentionPolicy, *Response) { - r, err := c.DoApiGet(c.GetDataRetentionRoute()+"/policy", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return DataRetentionPolicyFromJson(r.Body), BuildResponse(r) -} - -// Commands Section - -// CreateCommand will create a new command if the user have the right permissions. -func (c *Client4) CreateCommand(cmd *Command) (*Command, *Response) { - r, err := c.DoApiPost(c.GetCommandsRoute(), cmd.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CommandFromJson(r.Body), BuildResponse(r) -} - -// UpdateCommand updates a command based on the provided Command struct. -func (c *Client4) UpdateCommand(cmd *Command) (*Command, *Response) { - r, err := c.DoApiPut(c.GetCommandRoute(cmd.Id), cmd.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CommandFromJson(r.Body), BuildResponse(r) -} - -// MoveCommand moves a command to a different team. -func (c *Client4) MoveCommand(teamId string, commandId string) (bool, *Response) { - cmr := CommandMoveRequest{TeamId: teamId} - r, err := c.DoApiPut(c.GetCommandMoveRoute(commandId), cmr.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// DeleteCommand deletes a command based on the provided command id string. -func (c *Client4) DeleteCommand(commandId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetCommandRoute(commandId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// ListCommands will retrieve a list of commands available in the team. -func (c *Client4) ListCommands(teamId string, customOnly bool) ([]*Command, *Response) { - query := fmt.Sprintf("?team_id=%v&custom_only=%v", teamId, customOnly) - r, err := c.DoApiGet(c.GetCommandsRoute()+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CommandListFromJson(r.Body), BuildResponse(r) -} - -// ListCommandAutocompleteSuggestions will retrieve a list of suggestions for a userInput. -func (c *Client4) ListCommandAutocompleteSuggestions(userInput, teamId string) ([]AutocompleteSuggestion, *Response) { - query := fmt.Sprintf("/commands/autocomplete_suggestions?user_input=%v", userInput) - r, err := c.DoApiGet(c.GetTeamRoute(teamId)+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return AutocompleteSuggestionsFromJSON(r.Body), BuildResponse(r) -} - -// GetCommandById will retrieve a command by id. -func (c *Client4) GetCommandById(cmdId string) (*Command, *Response) { - url := fmt.Sprintf("%s/%s", c.GetCommandsRoute(), cmdId) - r, err := c.DoApiGet(url, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CommandFromJson(r.Body), BuildResponse(r) -} - -// ExecuteCommand executes a given slash command. -func (c *Client4) ExecuteCommand(channelId, command string) (*CommandResponse, *Response) { - commandArgs := &CommandArgs{ - ChannelId: channelId, - Command: command, - } - r, err := c.DoApiPost(c.GetCommandsRoute()+"/execute", commandArgs.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - response, _ := CommandResponseFromJson(r.Body) - return response, BuildResponse(r) -} - -// ExecuteCommandWithTeam executes a given slash command against the specified team. -// Use this when executing slash commands in a DM/GM, since the team id cannot be inferred in that case. -func (c *Client4) ExecuteCommandWithTeam(channelId, teamId, command string) (*CommandResponse, *Response) { - commandArgs := &CommandArgs{ - ChannelId: channelId, - TeamId: teamId, - Command: command, - } - r, err := c.DoApiPost(c.GetCommandsRoute()+"/execute", commandArgs.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - response, _ := CommandResponseFromJson(r.Body) - return response, BuildResponse(r) -} - -// ListAutocompleteCommands will retrieve a list of commands available in the team. -func (c *Client4) ListAutocompleteCommands(teamId string) ([]*Command, *Response) { - r, err := c.DoApiGet(c.GetTeamAutoCompleteCommandsRoute(teamId), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CommandListFromJson(r.Body), BuildResponse(r) -} - -// RegenCommandToken will create a new token if the user have the right permissions. -func (c *Client4) RegenCommandToken(commandId string) (string, *Response) { - r, err := c.DoApiPut(c.GetCommandRoute(commandId)+"/regen_token", "") - if err != nil { - return "", BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body)["token"], BuildResponse(r) -} - -// Status Section - -// GetUserStatus returns a user based on the provided user id string. -func (c *Client4) GetUserStatus(userId, etag string) (*Status, *Response) { - r, err := c.DoApiGet(c.GetUserStatusRoute(userId), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return StatusFromJson(r.Body), BuildResponse(r) -} - -// GetUsersStatusesByIds returns a list of users status based on the provided user ids. -func (c *Client4) GetUsersStatusesByIds(userIds []string) ([]*Status, *Response) { - r, err := c.DoApiPost(c.GetUserStatusesRoute()+"/ids", ArrayToJson(userIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return StatusListFromJson(r.Body), BuildResponse(r) -} - -// UpdateUserStatus sets a user's status based on the provided user id string. -func (c *Client4) UpdateUserStatus(userId string, userStatus *Status) (*Status, *Response) { - r, err := c.DoApiPut(c.GetUserStatusRoute(userId), userStatus.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return StatusFromJson(r.Body), BuildResponse(r) -} - -// Emoji Section - -// CreateEmoji will save an emoji to the server if the current user has permission -// to do so. If successful, the provided emoji will be returned with its Id field -// filled in. Otherwise, an error will be returned. -func (c *Client4) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emoji, *Response) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - part, err := writer.CreateFormFile("image", filename) - if err != nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)} - } - - if _, err := io.Copy(part, bytes.NewBuffer(image)); err != nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)} - } - - if err := writer.WriteField("emoji", emoji.ToJson()); err != nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error(), 0)} - } - - if err := writer.Close(); err != nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error(), 0)} - } - - return c.DoEmojiUploadFile(c.GetEmojisRoute(), body.Bytes(), writer.FormDataContentType()) -} - -// GetEmojiList returns a page of custom emoji on the system. -func (c *Client4) GetEmojiList(page, perPage int) ([]*Emoji, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) - r, err := c.DoApiGet(c.GetEmojisRoute()+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return EmojiListFromJson(r.Body), BuildResponse(r) -} - -// GetSortedEmojiList returns a page of custom emoji on the system sorted based on the sort -// parameter, blank for no sorting and "name" to sort by emoji names. -func (c *Client4) GetSortedEmojiList(page, perPage int, sort string) ([]*Emoji, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&sort=%v", page, perPage, sort) - r, err := c.DoApiGet(c.GetEmojisRoute()+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return EmojiListFromJson(r.Body), BuildResponse(r) -} - -// DeleteEmoji delete an custom emoji on the provided emoji id string. -func (c *Client4) DeleteEmoji(emojiId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetEmojiRoute(emojiId)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetEmoji returns a custom emoji based on the emojiId string. -func (c *Client4) GetEmoji(emojiId string) (*Emoji, *Response) { - r, err := c.DoApiGet(c.GetEmojiRoute(emojiId), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return EmojiFromJson(r.Body), BuildResponse(r) -} - -// GetEmojiByName returns a custom emoji based on the name string. -func (c *Client4) GetEmojiByName(name string) (*Emoji, *Response) { - r, err := c.DoApiGet(c.GetEmojiByNameRoute(name), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return EmojiFromJson(r.Body), BuildResponse(r) -} - -// GetEmojiImage returns the emoji image. -func (c *Client4) GetEmojiImage(emojiId string) ([]byte, *Response) { - r, apErr := c.DoApiGet(c.GetEmojiRoute(emojiId)+"/image", "") - if apErr != nil { - return nil, BuildErrorResponse(r, apErr) - } - defer closeBody(r) - - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("GetEmojiImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) - } - - return data, BuildResponse(r) -} - -// SearchEmoji returns a list of emoji matching some search criteria. -func (c *Client4) SearchEmoji(search *EmojiSearch) ([]*Emoji, *Response) { - r, err := c.DoApiPost(c.GetEmojisRoute()+"/search", search.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return EmojiListFromJson(r.Body), BuildResponse(r) -} - -// AutocompleteEmoji returns a list of emoji starting with or matching name. -func (c *Client4) AutocompleteEmoji(name string, etag string) ([]*Emoji, *Response) { - query := fmt.Sprintf("?name=%v", name) - r, err := c.DoApiGet(c.GetEmojisRoute()+"/autocomplete"+query, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return EmojiListFromJson(r.Body), BuildResponse(r) -} - -// Reaction Section - -// SaveReaction saves an emoji reaction for a post. Returns the saved reaction if successful, otherwise an error will be returned. -func (c *Client4) SaveReaction(reaction *Reaction) (*Reaction, *Response) { - r, err := c.DoApiPost(c.GetReactionsRoute(), reaction.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ReactionFromJson(r.Body), BuildResponse(r) -} - -// GetReactions returns a list of reactions to a post. -func (c *Client4) GetReactions(postId string) ([]*Reaction, *Response) { - r, err := c.DoApiGet(c.GetPostRoute(postId)+"/reactions", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ReactionsFromJson(r.Body), BuildResponse(r) -} - -// DeleteReaction deletes reaction of a user in a post. -func (c *Client4) DeleteReaction(reaction *Reaction) (bool, *Response) { - r, err := c.DoApiDelete(c.GetUserRoute(reaction.UserId) + c.GetPostRoute(reaction.PostId) + fmt.Sprintf("/reactions/%v", reaction.EmojiName)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// FetchBulkReactions returns a map of postIds and corresponding reactions -func (c *Client4) GetBulkReactions(postIds []string) (map[string][]*Reaction, *Response) { - r, err := c.DoApiPost(c.GetPostsRoute()+"/ids/reactions", ArrayToJson(postIds)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapPostIdToReactionsFromJson(r.Body), BuildResponse(r) -} - -// Timezone Section - -// GetSupportedTimezone returns a page of supported timezones on the system. -func (c *Client4) GetSupportedTimezone() ([]string, *Response) { - r, err := c.DoApiGet(c.GetTimezonesRoute(), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - var timezones []string - json.NewDecoder(r.Body).Decode(&timezones) - return timezones, BuildResponse(r) -} - -// Open Graph Metadata Section - -// OpenGraph return the open graph metadata for a particular url if the site have the metadata. -func (c *Client4) OpenGraph(url string) (map[string]string, *Response) { - requestBody := make(map[string]string) - requestBody["url"] = url - - r, err := c.DoApiPost(c.GetOpenGraphRoute(), MapToJson(requestBody)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body), BuildResponse(r) -} - -// Jobs Section - -// GetJob gets a single job. -func (c *Client4) GetJob(id string) (*Job, *Response) { - r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/%v", id), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return JobFromJson(r.Body), BuildResponse(r) -} - -// GetJobs gets all jobs, sorted with the job that was created most recently first. -func (c *Client4) GetJobs(page int, perPage int) ([]*Job, *Response) { - r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("?page=%v&per_page=%v", page, perPage), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return JobsFromJson(r.Body), BuildResponse(r) -} - -// GetJobsByType gets all jobs of a given type, sorted with the job that was created most recently first. -func (c *Client4) GetJobsByType(jobType string, page int, perPage int) ([]*Job, *Response) { - r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/type/%v?page=%v&per_page=%v", jobType, page, perPage), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return JobsFromJson(r.Body), BuildResponse(r) -} - -// CreateJob creates a job based on the provided job struct. -func (c *Client4) CreateJob(job *Job) (*Job, *Response) { - r, err := c.DoApiPost(c.GetJobsRoute(), job.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return JobFromJson(r.Body), BuildResponse(r) -} - -// CancelJob requests the cancellation of the job with the provided Id. -func (c *Client4) CancelJob(jobId string) (bool, *Response) { - r, err := c.DoApiPost(c.GetJobsRoute()+fmt.Sprintf("/%v/cancel", jobId), "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// Roles Section - -// GetRole gets a single role by ID. -func (c *Client4) GetRole(id string) (*Role, *Response) { - r, err := c.DoApiGet(c.GetRolesRoute()+fmt.Sprintf("/%v", id), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return RoleFromJson(r.Body), BuildResponse(r) -} - -// GetRoleByName gets a single role by Name. -func (c *Client4) GetRoleByName(name string) (*Role, *Response) { - r, err := c.DoApiGet(c.GetRolesRoute()+fmt.Sprintf("/name/%v", name), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return RoleFromJson(r.Body), BuildResponse(r) -} - -// GetRolesByNames returns a list of roles based on the provided role names. -func (c *Client4) GetRolesByNames(roleNames []string) ([]*Role, *Response) { - r, err := c.DoApiPost(c.GetRolesRoute()+"/names", ArrayToJson(roleNames)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return RoleListFromJson(r.Body), BuildResponse(r) -} - -// PatchRole partially updates a role in the system. Any missing fields are not updated. -func (c *Client4) PatchRole(roleId string, patch *RolePatch) (*Role, *Response) { - r, err := c.DoApiPut(c.GetRolesRoute()+fmt.Sprintf("/%v/patch", roleId), patch.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return RoleFromJson(r.Body), BuildResponse(r) -} - -// Schemes Section - -// CreateScheme creates a new Scheme. -func (c *Client4) CreateScheme(scheme *Scheme) (*Scheme, *Response) { - r, err := c.DoApiPost(c.GetSchemesRoute(), scheme.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return SchemeFromJson(r.Body), BuildResponse(r) -} - -// GetScheme gets a single scheme by ID. -func (c *Client4) GetScheme(id string) (*Scheme, *Response) { - r, err := c.DoApiGet(c.GetSchemeRoute(id), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return SchemeFromJson(r.Body), BuildResponse(r) -} - -// GetSchemes gets all schemes, sorted with the most recently created first, optionally filtered by scope. -func (c *Client4) GetSchemes(scope string, page int, perPage int) ([]*Scheme, *Response) { - r, err := c.DoApiGet(c.GetSchemesRoute()+fmt.Sprintf("?scope=%v&page=%v&per_page=%v", scope, page, perPage), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return SchemesFromJson(r.Body), BuildResponse(r) -} - -// DeleteScheme deletes a single scheme by ID. -func (c *Client4) DeleteScheme(id string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetSchemeRoute(id)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// PatchScheme partially updates a scheme in the system. Any missing fields are not updated. -func (c *Client4) PatchScheme(id string, patch *SchemePatch) (*Scheme, *Response) { - r, err := c.DoApiPut(c.GetSchemeRoute(id)+"/patch", patch.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return SchemeFromJson(r.Body), BuildResponse(r) -} - -// GetTeamsForScheme gets the teams using this scheme, sorted alphabetically by display name. -func (c *Client4) GetTeamsForScheme(schemeId string, page int, perPage int) ([]*Team, *Response) { - r, err := c.DoApiGet(c.GetSchemeRoute(schemeId)+fmt.Sprintf("/teams?page=%v&per_page=%v", page, perPage), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TeamListFromJson(r.Body), BuildResponse(r) -} - -// GetChannelsForScheme gets the channels using this scheme, sorted alphabetically by display name. -func (c *Client4) GetChannelsForScheme(schemeId string, page int, perPage int) (ChannelList, *Response) { - r, err := c.DoApiGet(c.GetSchemeRoute(schemeId)+fmt.Sprintf("/channels?page=%v&per_page=%v", page, perPage), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return *ChannelListFromJson(r.Body), BuildResponse(r) -} - -// Plugin Section - -// UploadPlugin takes an io.Reader stream pointing to the contents of a .tar.gz plugin. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) UploadPlugin(file io.Reader) (*Manifest, *Response) { - return c.uploadPlugin(file, false) -} - -func (c *Client4) UploadPluginForced(file io.Reader) (*Manifest, *Response) { - return c.uploadPlugin(file, true) -} - -func (c *Client4) uploadPlugin(file io.Reader, force bool) (*Manifest, *Response) { - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - - if force { - err := writer.WriteField("force", c.boolString(true)) - if err != nil { - return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} - } - } - - part, err := writer.CreateFormFile("plugin", "plugin.tar.gz") - if err != nil { - return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} - } - - if _, err = io.Copy(part, file); err != nil { - return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} - } - - if err = writer.Close(); err != nil { - return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} - } - - rq, err := http.NewRequest("POST", c.ApiUrl+c.GetPluginsRoute(), body) - if err != nil { - return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} - } - rq.Header.Set("Content-Type", writer.FormDataContentType()) - - if len(c.AuthToken) > 0 { - rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) - } - - rp, err := c.HttpClient.Do(rq) - if err != nil || rp == nil { - return nil, BuildErrorResponse(rp, NewAppError("UploadPlugin", "model.client.connecting.app_error", nil, err.Error(), 0)) - } - defer closeBody(rp) - - if rp.StatusCode >= 300 { - return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) - } - - return ManifestFromJson(rp.Body), BuildResponse(rp) -} - -func (c *Client4) InstallPluginFromUrl(downloadUrl string, force bool) (*Manifest, *Response) { - forceStr := c.boolString(force) - - url := fmt.Sprintf("%s?plugin_download_url=%s&force=%s", c.GetPluginsRoute()+"/install_from_url", url.QueryEscape(downloadUrl), forceStr) - r, err := c.DoApiPost(url, "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ManifestFromJson(r.Body), BuildResponse(r) -} - -// InstallMarketplacePlugin will install marketplace plugin. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) InstallMarketplacePlugin(request *InstallMarketplacePluginRequest) (*Manifest, *Response) { - json, err := request.ToJson() - if err != nil { - return nil, &Response{Error: NewAppError("InstallMarketplacePlugin", "model.client.plugin_request_to_json.app_error", nil, err.Error(), http.StatusBadRequest)} - } - r, appErr := c.DoApiPost(c.GetPluginsRoute()+"/marketplace", json) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return ManifestFromJson(r.Body), BuildResponse(r) -} - -// GetPlugins will return a list of plugin manifests for currently active plugins. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) GetPlugins() (*PluginsResponse, *Response) { - r, err := c.DoApiGet(c.GetPluginsRoute(), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PluginsResponseFromJson(r.Body), BuildResponse(r) -} - -// GetPluginStatuses will return the plugins installed on any server in the cluster, for reporting -// to the administrator via the system console. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) GetPluginStatuses() (PluginStatuses, *Response) { - r, err := c.DoApiGet(c.GetPluginsRoute()+"/statuses", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return PluginStatusesFromJson(r.Body), BuildResponse(r) -} - -// RemovePlugin will disable and delete a plugin. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) RemovePlugin(id string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetPluginRoute(id)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetWebappPlugins will return a list of plugins that the webapp should download. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) GetWebappPlugins() ([]*Manifest, *Response) { - r, err := c.DoApiGet(c.GetPluginsRoute()+"/webapp", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ManifestListFromJson(r.Body), BuildResponse(r) -} - -// EnablePlugin will enable an plugin installed. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) EnablePlugin(id string) (bool, *Response) { - r, err := c.DoApiPost(c.GetPluginRoute(id)+"/enable", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// DisablePlugin will disable an enabled plugin. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) DisablePlugin(id string) (bool, *Response) { - r, err := c.DoApiPost(c.GetPluginRoute(id)+"/disable", "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetMarketplacePlugins will return a list of plugins that an admin can install. -// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. -func (c *Client4) GetMarketplacePlugins(filter *MarketplacePluginFilter) ([]*MarketplacePlugin, *Response) { - route := c.GetPluginsRoute() + "/marketplace" - u, parseErr := url.Parse(route) - if parseErr != nil { - return nil, &Response{Error: NewAppError("GetMarketplacePlugins", "model.client.parse_plugins.app_error", nil, parseErr.Error(), http.StatusBadRequest)} - } - - filter.ApplyToURL(u) - - r, err := c.DoApiGet(u.String(), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - plugins, readerErr := MarketplacePluginsFromReader(r.Body) - if readerErr != nil { - return nil, BuildErrorResponse(r, NewAppError(route, "model.client.parse_plugins.app_error", nil, err.Error(), http.StatusBadRequest)) - } - - return plugins, BuildResponse(r) -} - -// UpdateChannelScheme will update a channel's scheme. -func (c *Client4) UpdateChannelScheme(channelId, schemeId string) (bool, *Response) { - sip := &SchemeIDPatch{SchemeID: &schemeId} - r, err := c.DoApiPut(c.GetChannelSchemeRoute(channelId), sip.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// UpdateTeamScheme will update a team's scheme. -func (c *Client4) UpdateTeamScheme(teamId, schemeId string) (bool, *Response) { - sip := &SchemeIDPatch{SchemeID: &schemeId} - r, err := c.DoApiPut(c.GetTeamSchemeRoute(teamId), sip.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetRedirectLocation retrieves the value of the 'Location' header of an HTTP response for a given URL. -func (c *Client4) GetRedirectLocation(urlParam, etag string) (string, *Response) { - url := fmt.Sprintf("%s?url=%s", c.GetRedirectLocationRoute(), url.QueryEscape(urlParam)) - r, err := c.DoApiGet(url, etag) - if err != nil { - return "", BuildErrorResponse(r, err) - } - defer closeBody(r) - return MapFromJson(r.Body)["location"], BuildResponse(r) -} - -// SetServerBusy will mark the server as busy, which disables non-critical services for `secs` seconds. -func (c *Client4) SetServerBusy(secs int) (bool, *Response) { - url := fmt.Sprintf("%s?seconds=%d", c.GetServerBusyRoute(), secs) - r, err := c.DoApiPost(url, "") - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// ClearServerBusy will mark the server as not busy. -func (c *Client4) ClearServerBusy() (bool, *Response) { - r, err := c.DoApiDelete(c.GetServerBusyRoute()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetServerBusy returns the current ServerBusyState including the time when a server marked busy -// will automatically have the flag cleared. -func (c *Client4) GetServerBusy() (*ServerBusyState, *Response) { - r, err := c.DoApiGet(c.GetServerBusyRoute(), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - sbs := ServerBusyStateFromJson(r.Body) - return sbs, BuildResponse(r) -} - -// GetServerBusyExpires returns the time when a server marked busy -// will automatically have the flag cleared. -// -// Deprecated: Use GetServerBusy instead. -func (c *Client4) GetServerBusyExpires() (*time.Time, *Response) { - r, err := c.DoApiGet(c.GetServerBusyRoute(), "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - - sbs := ServerBusyStateFromJson(r.Body) - expires := time.Unix(sbs.Expires, 0) - return &expires, BuildResponse(r) -} - -// RegisterTermsOfServiceAction saves action performed by a user against a specific terms of service. -func (c *Client4) RegisterTermsOfServiceAction(userId, termsOfServiceId string, accepted bool) (*bool, *Response) { - url := c.GetUserTermsOfServiceRoute(userId) - data := map[string]interface{}{"termsOfServiceId": termsOfServiceId, "accepted": accepted} - r, err := c.DoApiPost(url, StringInterfaceToJson(data)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return NewBool(CheckStatusOK(r)), BuildResponse(r) -} - -// GetTermsOfService fetches the latest terms of service -func (c *Client4) GetTermsOfService(etag string) (*TermsOfService, *Response) { - url := c.GetTermsOfServiceRoute() - r, err := c.DoApiGet(url, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TermsOfServiceFromJson(r.Body), BuildResponse(r) -} - -// GetUserTermsOfService fetches user's latest terms of service action if the latest action was for acceptance. -func (c *Client4) GetUserTermsOfService(userId, etag string) (*UserTermsOfService, *Response) { - url := c.GetUserTermsOfServiceRoute(userId) - r, err := c.DoApiGet(url, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return UserTermsOfServiceFromJson(r.Body), BuildResponse(r) -} - -// CreateTermsOfService creates new terms of service. -func (c *Client4) CreateTermsOfService(text, userId string) (*TermsOfService, *Response) { - url := c.GetTermsOfServiceRoute() - data := map[string]interface{}{"text": text} - r, err := c.DoApiPost(url, StringInterfaceToJson(data)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return TermsOfServiceFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) GetGroup(groupID, etag string) (*Group, *Response) { - r, appErr := c.DoApiGet(c.GetGroupRoute(groupID), etag) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return GroupFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) PatchGroup(groupID string, patch *GroupPatch) (*Group, *Response) { - payload, _ := json.Marshal(patch) - r, appErr := c.DoApiPut(c.GetGroupRoute(groupID)+"/patch", string(payload)) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return GroupFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) LinkGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType, patch *GroupSyncablePatch) (*GroupSyncable, *Response) { - payload, _ := json.Marshal(patch) - url := fmt.Sprintf("%s/link", c.GetGroupSyncableRoute(groupID, syncableID, syncableType)) - r, appErr := c.DoApiPost(url, string(payload)) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return GroupSyncableFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) UnlinkGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType) *Response { - url := fmt.Sprintf("%s/link", c.GetGroupSyncableRoute(groupID, syncableID, syncableType)) - r, appErr := c.DoApiDelete(url) - if appErr != nil { - return BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return BuildResponse(r) -} - -func (c *Client4) GetGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType, etag string) (*GroupSyncable, *Response) { - r, appErr := c.DoApiGet(c.GetGroupSyncableRoute(groupID, syncableID, syncableType), etag) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return GroupSyncableFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) GetGroupSyncables(groupID string, syncableType GroupSyncableType, etag string) ([]*GroupSyncable, *Response) { - r, appErr := c.DoApiGet(c.GetGroupSyncablesRoute(groupID, syncableType), etag) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return GroupSyncablesFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) PatchGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType, patch *GroupSyncablePatch) (*GroupSyncable, *Response) { - payload, _ := json.Marshal(patch) - r, appErr := c.DoApiPut(c.GetGroupSyncableRoute(groupID, syncableID, syncableType)+"/patch", string(payload)) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return GroupSyncableFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) TeamMembersMinusGroupMembers(teamID string, groupIDs []string, page, perPage int, etag string) ([]*UserWithGroups, int64, *Response) { - groupIDStr := strings.Join(groupIDs, ",") - query := fmt.Sprintf("?group_ids=%s&page=%d&per_page=%d", groupIDStr, page, perPage) - r, err := c.DoApiGet(c.GetTeamRoute(teamID)+"/members_minus_group_members"+query, etag) - if err != nil { - return nil, 0, BuildErrorResponse(r, err) - } - defer closeBody(r) - ugc := UsersWithGroupsAndCountFromJson(r.Body) - return ugc.Users, ugc.Count, BuildResponse(r) -} - -func (c *Client4) ChannelMembersMinusGroupMembers(channelID string, groupIDs []string, page, perPage int, etag string) ([]*UserWithGroups, int64, *Response) { - groupIDStr := strings.Join(groupIDs, ",") - query := fmt.Sprintf("?group_ids=%s&page=%d&per_page=%d", groupIDStr, page, perPage) - r, err := c.DoApiGet(c.GetChannelRoute(channelID)+"/members_minus_group_members"+query, etag) - if err != nil { - return nil, 0, BuildErrorResponse(r, err) - } - defer closeBody(r) - ugc := UsersWithGroupsAndCountFromJson(r.Body) - return ugc.Users, ugc.Count, BuildResponse(r) -} - -func (c *Client4) PatchConfig(config *Config) (*Config, *Response) { - r, err := c.DoApiPut(c.GetConfigRoute()+"/patch", config.ToJson()) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ConfigFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) GetChannelModerations(channelID string, etag string) ([]*ChannelModeration, *Response) { - r, err := c.DoApiGet(c.GetChannelRoute(channelID)+"/moderations", etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelModerationsFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) PatchChannelModerations(channelID string, patch []*ChannelModerationPatch) ([]*ChannelModeration, *Response) { - payload, _ := json.Marshal(patch) - r, err := c.DoApiPut(c.GetChannelRoute(channelID)+"/moderations/patch", string(payload)) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelModerationsFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) GetKnownUsers() ([]string, *Response) { - r, err := c.DoApiGet(c.GetUsersRoute()+"/known", "") - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - var userIds []string - json.NewDecoder(r.Body).Decode(&userIds) - return userIds, BuildResponse(r) -} - -// PublishUserTyping publishes a user is typing websocket event based on the provided TypingRequest. -func (c *Client4) PublishUserTyping(userID string, typingRequest TypingRequest) (bool, *Response) { - r, err := c.DoApiPost(c.GetPublishUserTypingRoute(userID), typingRequest.ToJson()) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -func (c *Client4) GetChannelMemberCountsByGroup(channelID string, includeTimezones bool, etag string) ([]*ChannelMemberCountByGroup, *Response) { - r, err := c.DoApiGet(c.GetChannelRoute(channelID)+"/member_counts_by_group?include_timezones="+strconv.FormatBool(includeTimezones), etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ChannelMemberCountsByGroupFromJson(r.Body), BuildResponse(r) -} - -// RequestTrialLicense will request a trial license and install it in the server -func (c *Client4) RequestTrialLicense(users int) (bool, *Response) { - b, _ := json.Marshal(map[string]int{"users": users}) - r, err := c.DoApiPost("/trial-license", string(b)) - if err != nil { - return false, BuildErrorResponse(r, err) - } - defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) -} - -// GetGroupStats retrieves stats for a Mattermost Group -func (c *Client4) GetGroupStats(groupID string) (*GroupStats, *Response) { - r, appErr := c.DoApiGet(c.GetGroupRoute(groupID)+"/stats", "") - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - return GroupStatsFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) GetSidebarCategoriesForTeamForUser(userID, teamID, etag string) (*OrderedSidebarCategories, *Response) { - route := c.GetUserCategoryRoute(userID, teamID) - r, appErr := c.DoApiGet(route, etag) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - cat, err := OrderedSidebarCategoriesFromJson(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("Client4.GetSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) - } - return cat, BuildResponse(r) -} - -func (c *Client4) CreateSidebarCategoryForTeamForUser(userID, teamID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response) { - payload, _ := json.Marshal(category) - route := c.GetUserCategoryRoute(userID, teamID) - r, appErr := c.doApiPostBytes(route, payload) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - cat, err := SidebarCategoryFromJson(r.Body) - if err != nil { - return nil, BuildErrorResponse(r, NewAppError("Client4.CreateSidebarCategoryForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) - } - return cat, BuildResponse(r) -} - -func (c *Client4) GetSidebarCategoryOrderForTeamForUser(userID, teamID, etag string) ([]string, *Response) { - route := c.GetUserCategoryRoute(userID, teamID) + "/order" - r, err := c.DoApiGet(route, etag) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ArrayFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) UpdateSidebarCategoryOrderForTeamForUser(userID, teamID string, order []string) ([]string, *Response) { - payload, _ := json.Marshal(order) - route := c.GetUserCategoryRoute(userID, teamID) + "/order" - r, err := c.doApiPutBytes(route, payload) - if err != nil { - return nil, BuildErrorResponse(r, err) - } - defer closeBody(r) - return ArrayFromJson(r.Body), BuildResponse(r) -} - -func (c *Client4) GetSidebarCategoryForTeamForUser(userID, teamID, categoryID, etag string) (*SidebarCategoryWithChannels, *Response) { - route := c.GetUserCategoryRoute(userID, teamID) + "/" + categoryID - r, appErr := c.DoApiGet(route, etag) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - cat, err := SidebarCategoryFromJson(r.Body) - if err != nil { - return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError(c.GetUserRoute(userID), "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} - } - - return cat, BuildResponse(r) -} - -func (c *Client4) UpdateSidebarCategoryForTeamForUser(userID, teamID, categoryID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response) { - payload, _ := json.Marshal(category) - route := c.GetUserCategoryRoute(userID, teamID) + "/" + categoryID - r, appErr := c.doApiPutBytes(route, payload) - if appErr != nil { - return nil, BuildErrorResponse(r, appErr) - } - defer closeBody(r) - cat, err := SidebarCategoryFromJson(r.Body) - if err != nil { - return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError(c.GetUserRoute(userID), "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} - } - - return cat, BuildResponse(r) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_info.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_info.go deleted file mode 100644 index 82437469..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_info.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type ClusterInfo struct { - Id string `json:"id"` - Version string `json:"version"` - ConfigHash string `json:"config_hash"` - IpAddress string `json:"ipaddress"` - Hostname string `json:"hostname"` -} - -func (me *ClusterInfo) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func ClusterInfoFromJson(data io.Reader) *ClusterInfo { - var me *ClusterInfo - json.NewDecoder(data).Decode(&me) - return me -} - -func ClusterInfosToJson(objmap []*ClusterInfo) string { - b, _ := json.Marshal(objmap) - return string(b) -} - -func ClusterInfosFromJson(data io.Reader) []*ClusterInfo { - decoder := json.NewDecoder(data) - - var objmap []*ClusterInfo - if err := decoder.Decode(&objmap); err != nil { - return make([]*ClusterInfo, 0) - } else { - return objmap - } -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go deleted file mode 100644 index 529f4a93..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -const ( - CLUSTER_EVENT_PUBLISH = "publish" - CLUSTER_EVENT_UPDATE_STATUS = "update_status" - CLUSTER_EVENT_INVALIDATE_ALL_CACHES = "inv_all_caches" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS = "inv_reactions" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_WEBHOOK = "inv_webhook" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_POSTS = "inv_channel_posts" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS_NOTIFY_PROPS = "inv_channel_members_notify_props" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS = "inv_channel_members" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_BY_NAME = "inv_channel_name" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL = "inv_channel" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_GUEST_COUNT = "inv_channel_guest_count" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_USER = "inv_user" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_USER_TEAMS = "inv_user_teams" - CLUSTER_EVENT_CLEAR_SESSION_CACHE_FOR_USER = "clear_session_user" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLES = "inv_roles" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLE_PERMISSIONS = "inv_role_permissions" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_PROFILE_BY_IDS = "inv_profile_ids" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_PROFILE_IN_CHANNEL = "inv_profile_in_channel" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_SCHEMES = "inv_schemes" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_FILE_INFOS = "inv_file_infos" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_WEBHOOKS = "inv_webhooks" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_EMOJIS_BY_ID = "inv_emojis_by_id" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_EMOJIS_ID_BY_NAME = "inv_emojis_id_by_name" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_PINNEDPOSTS_COUNTS = "inv_channel_pinnedposts_counts" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBER_COUNTS = "inv_channel_member_counts" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_LAST_POSTS = "inv_last_posts" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_LAST_POST_TIME = "inv_last_post_time" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_TEAMS = "inv_teams" - CLUSTER_EVENT_CLEAR_SESSION_CACHE_FOR_ALL_USERS = "inv_all_user_sessions" - CLUSTER_EVENT_INSTALL_PLUGIN = "install_plugin" - CLUSTER_EVENT_REMOVE_PLUGIN = "remove_plugin" - CLUSTER_EVENT_INVALIDATE_CACHE_FOR_TERMS_OF_SERVICE = "inv_terms_of_service" - CLUSTER_EVENT_BUSY_STATE_CHANGED = "busy_state_change" - - // Gossip communication - CLUSTER_GOSSIP_EVENT_REQUEST_GET_LOGS = "gossip_request_get_logs" - CLUSTER_GOSSIP_EVENT_RESPONSE_GET_LOGS = "gossip_response_get_logs" - CLUSTER_GOSSIP_EVENT_REQUEST_GET_CLUSTER_STATS = "gossip_request_cluster_stats" - CLUSTER_GOSSIP_EVENT_RESPONSE_GET_CLUSTER_STATS = "gossip_response_cluster_stats" - CLUSTER_GOSSIP_EVENT_REQUEST_GET_PLUGIN_STATUSES = "gossip_request_plugin_statuses" - CLUSTER_GOSSIP_EVENT_RESPONSE_GET_PLUGIN_STATUSES = "gossip_response_plugin_statuses" - CLUSTER_GOSSIP_EVENT_REQUEST_SAVE_CONFIG = "gossip_request_save_config" - CLUSTER_GOSSIP_EVENT_RESPONSE_SAVE_CONFIG = "gossip_response_save_config" - - // SendTypes for ClusterMessage. - CLUSTER_SEND_BEST_EFFORT = "best_effort" - CLUSTER_SEND_RELIABLE = "reliable" -) - -type ClusterMessage struct { - Event string `json:"event"` - SendType string `json:"-"` - WaitForAllToSend bool `json:"-"` - Data string `json:"data,omitempty"` - Props map[string]string `json:"props,omitempty"` -} - -func (o *ClusterMessage) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func ClusterMessageFromJson(data io.Reader) *ClusterMessage { - var o *ClusterMessage - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go deleted file mode 100644 index 15a6372a..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" - - goi18n "github.com/mattermost/go-i18n/i18n" -) - -type CommandArgs struct { - UserId string `json:"user_id"` - ChannelId string `json:"channel_id"` - TeamId string `json:"team_id"` - RootId string `json:"root_id"` - ParentId string `json:"parent_id"` - TriggerId string `json:"trigger_id,omitempty"` - Command string `json:"command"` - SiteURL string `json:"-"` - T goi18n.TranslateFunc `json:"-"` - UserMentions UserMentionMap `json:"-"` - ChannelMentions ChannelMentionMap `json:"-"` - - // DO NOT USE Session field is deprecated. MM-26398 - Session Session `json:"-"` -} - -func (o *CommandArgs) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func CommandArgsFromJson(data io.Reader) *CommandArgs { - var o *CommandArgs - json.NewDecoder(data).Decode(&o) - return o -} - -// AddUserMention adds or overrides an entry in UserMentions with name username -// and identifier userId -func (o *CommandArgs) AddUserMention(username, userId string) { - if o.UserMentions == nil { - o.UserMentions = make(UserMentionMap) - } - - o.UserMentions[username] = userId -} - -// AddChannelMention adds or overrides an entry in ChannelMentions with name -// channelName and identifier channelId -func (o *CommandArgs) AddChannelMention(channelName, channelId string) { - if o.ChannelMentions == nil { - o.ChannelMentions = make(ChannelMentionMap) - } - - o.ChannelMentions[channelName] = channelId -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_request.go deleted file mode 100644 index 9a4e40c8..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/command_request.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type CommandMoveRequest struct { - TeamId string `json:"team_id"` -} - -func CommandMoveRequestFromJson(data io.Reader) (*CommandMoveRequest, error) { - decoder := json.NewDecoder(data) - var cmr CommandMoveRequest - err := decoder.Decode(&cmr) - if err != nil { - return nil, err - } - return &cmr, nil -} - -func (cmr *CommandMoveRequest) ToJson() string { - b, err := json.Marshal(cmr) - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/config.go b/vendor/github.com/mattermost/mattermost-server/v5/model/config.go deleted file mode 100644 index 462ec219..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/config.go +++ /dev/null @@ -1,3495 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "crypto/tls" - "encoding/json" - "io" - "math" - "net" - "net/http" - "net/url" - "os" - "regexp" - "strconv" - "strings" - "time" - - "github.com/mattermost/ldap" -) - -const ( - CONN_SECURITY_NONE = "" - CONN_SECURITY_PLAIN = "PLAIN" - CONN_SECURITY_TLS = "TLS" - CONN_SECURITY_STARTTLS = "STARTTLS" - - IMAGE_DRIVER_LOCAL = "local" - IMAGE_DRIVER_S3 = "amazons3" - - DATABASE_DRIVER_SQLITE = "sqlite3" - DATABASE_DRIVER_MYSQL = "mysql" - DATABASE_DRIVER_POSTGRES = "postgres" - - MINIO_ACCESS_KEY = "minioaccesskey" - MINIO_SECRET_KEY = "miniosecretkey" - MINIO_BUCKET = "mattermost-test" - - PASSWORD_MAXIMUM_LENGTH = 64 - PASSWORD_MINIMUM_LENGTH = 5 - - SERVICE_GITLAB = "gitlab" - SERVICE_GOOGLE = "google" - SERVICE_OFFICE365 = "office365" - - GENERIC_NO_CHANNEL_NOTIFICATION = "generic_no_channel" - GENERIC_NOTIFICATION = "generic" - GENERIC_NOTIFICATION_SERVER = "https://push-test.mattermost.com" - MM_SUPPORT_ADDRESS = "support@mattermost.com" - FULL_NOTIFICATION = "full" - ID_LOADED_NOTIFICATION = "id_loaded" - - DIRECT_MESSAGE_ANY = "any" - DIRECT_MESSAGE_TEAM = "team" - - SHOW_USERNAME = "username" - SHOW_NICKNAME_FULLNAME = "nickname_full_name" - SHOW_FULLNAME = "full_name" - - PERMISSIONS_ALL = "all" - PERMISSIONS_CHANNEL_ADMIN = "channel_admin" - PERMISSIONS_TEAM_ADMIN = "team_admin" - PERMISSIONS_SYSTEM_ADMIN = "system_admin" - - FAKE_SETTING = "********************************" - - RESTRICT_EMOJI_CREATION_ALL = "all" - RESTRICT_EMOJI_CREATION_ADMIN = "admin" - RESTRICT_EMOJI_CREATION_SYSTEM_ADMIN = "system_admin" - - PERMISSIONS_DELETE_POST_ALL = "all" - PERMISSIONS_DELETE_POST_TEAM_ADMIN = "team_admin" - PERMISSIONS_DELETE_POST_SYSTEM_ADMIN = "system_admin" - - ALLOW_EDIT_POST_ALWAYS = "always" - ALLOW_EDIT_POST_NEVER = "never" - ALLOW_EDIT_POST_TIME_LIMIT = "time_limit" - - GROUP_UNREAD_CHANNELS_DISABLED = "disabled" - GROUP_UNREAD_CHANNELS_DEFAULT_ON = "default_on" - GROUP_UNREAD_CHANNELS_DEFAULT_OFF = "default_off" - - EMAIL_BATCHING_BUFFER_SIZE = 256 - EMAIL_BATCHING_INTERVAL = 30 - - EMAIL_NOTIFICATION_CONTENTS_FULL = "full" - EMAIL_NOTIFICATION_CONTENTS_GENERIC = "generic" - - SITENAME_MAX_LENGTH = 30 - - SERVICE_SETTINGS_DEFAULT_SITE_URL = "http://localhost:8065" - SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE = "" - SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE = "" - SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT = 300 - SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT = 300 - SERVICE_SETTINGS_DEFAULT_IDLE_TIMEOUT = 60 - SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS = 10 - SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM = "" - SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS = ":8065" - SERVICE_SETTINGS_DEFAULT_GFYCAT_API_KEY = "2_KtH_W5" - SERVICE_SETTINGS_DEFAULT_GFYCAT_API_SECRET = "3wLVZPiswc3DnaiaFoLkDvB4X0IV6CpMkj4tf2inJRsBY6-FnkT08zGmppWFgeof" - - TEAM_SETTINGS_DEFAULT_SITE_NAME = "Mattermost" - TEAM_SETTINGS_DEFAULT_MAX_USERS_PER_TEAM = 50 - TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT = "" - TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT = "" - TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT = 300 - - SQL_SETTINGS_DEFAULT_DATA_SOURCE = "mmuser:mostest@tcp(localhost:3306)/mattermost_test?charset=utf8mb4,utf8&readTimeout=30s&writeTimeout=30s" - - FILE_SETTINGS_DEFAULT_DIRECTORY = "./data/" - - EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION = "" - - SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK = "https://about.mattermost.com/default-terms/" - SUPPORT_SETTINGS_DEFAULT_PRIVACY_POLICY_LINK = "https://about.mattermost.com/default-privacy-policy/" - SUPPORT_SETTINGS_DEFAULT_ABOUT_LINK = "https://about.mattermost.com/default-about/" - SUPPORT_SETTINGS_DEFAULT_HELP_LINK = "https://about.mattermost.com/default-help/" - SUPPORT_SETTINGS_DEFAULT_REPORT_A_PROBLEM_LINK = "https://about.mattermost.com/default-report-a-problem/" - SUPPORT_SETTINGS_DEFAULT_SUPPORT_EMAIL = "feedback@mattermost.com" - SUPPORT_SETTINGS_DEFAULT_RE_ACCEPTANCE_PERIOD = 365 - - LDAP_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_ID_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME = "" - LDAP_SETTINGS_DEFAULT_GROUP_DISPLAY_NAME_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_GROUP_ID_ATTRIBUTE = "" - LDAP_SETTINGS_DEFAULT_PICTURE_ATTRIBUTE = "" - - SAML_SETTINGS_DEFAULT_ID_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_GUEST_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_ADMIN_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE = "" - SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE = "" - - SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA1 = "RSAwithSHA1" - SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA256 = "RSAwithSHA256" - SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA512 = "RSAwithSHA512" - SAML_SETTINGS_DEFAULT_SIGNATURE_ALGORITHM = SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA1 - - SAML_SETTINGS_CANONICAL_ALGORITHM_C14N = "Canonical1.0" - SAML_SETTINGS_CANONICAL_ALGORITHM_C14N11 = "Canonical1.1" - SAML_SETTINGS_DEFAULT_CANONICAL_ALGORITHM = SAML_SETTINGS_CANONICAL_ALGORITHM_C14N - - NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK = "https://mattermost.com/download/#mattermostApps" - NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK = "https://about.mattermost.com/mattermost-android-app/" - NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK = "https://about.mattermost.com/mattermost-ios-app/" - - EXPERIMENTAL_SETTINGS_DEFAULT_LINK_METADATA_TIMEOUT_MILLISECONDS = 5000 - - ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS = 2500 - - ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b" - ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333" - - TEAM_SETTINGS_DEFAULT_TEAM_TEXT = "default" - - ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL = "http://localhost:9200" - ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME = "elastic" - ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD = "changeme" - ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS = 1 - ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS = 1 - ELASTICSEARCH_SETTINGS_DEFAULT_CHANNEL_INDEX_REPLICAS = 1 - ELASTICSEARCH_SETTINGS_DEFAULT_CHANNEL_INDEX_SHARDS = 1 - ELASTICSEARCH_SETTINGS_DEFAULT_USER_INDEX_REPLICAS = 1 - ELASTICSEARCH_SETTINGS_DEFAULT_USER_INDEX_SHARDS = 1 - ELASTICSEARCH_SETTINGS_DEFAULT_AGGREGATE_POSTS_AFTER_DAYS = 365 - ELASTICSEARCH_SETTINGS_DEFAULT_POSTS_AGGREGATOR_JOB_START_TIME = "03:00" - ELASTICSEARCH_SETTINGS_DEFAULT_INDEX_PREFIX = "" - ELASTICSEARCH_SETTINGS_DEFAULT_LIVE_INDEXING_BATCH_SIZE = 1 - ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS = 3600 - ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS = 30 - - BLEVE_SETTINGS_DEFAULT_INDEX_DIR = "" - BLEVE_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS = 3600 - - DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS = 365 - DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS = 365 - DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME = "02:00" - - PLUGIN_SETTINGS_DEFAULT_DIRECTORY = "./plugins" - PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY = "./client/plugins" - PLUGIN_SETTINGS_DEFAULT_ENABLE_MARKETPLACE = true - PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL = "https://api.integrations.mattermost.com" - PLUGIN_SETTINGS_OLD_MARKETPLACE_URL = "https://marketplace.integrations.mattermost.com" - - COMPLIANCE_EXPORT_TYPE_CSV = "csv" - COMPLIANCE_EXPORT_TYPE_ACTIANCE = "actiance" - COMPLIANCE_EXPORT_TYPE_GLOBALRELAY = "globalrelay" - COMPLIANCE_EXPORT_TYPE_GLOBALRELAY_ZIP = "globalrelay-zip" - GLOBALRELAY_CUSTOMER_TYPE_A9 = "A9" - GLOBALRELAY_CUSTOMER_TYPE_A10 = "A10" - - CLIENT_SIDE_CERT_CHECK_PRIMARY_AUTH = "primary" - CLIENT_SIDE_CERT_CHECK_SECONDARY_AUTH = "secondary" - - IMAGE_PROXY_TYPE_LOCAL = "local" - IMAGE_PROXY_TYPE_ATMOS_CAMO = "atmos/camo" - - GOOGLE_SETTINGS_DEFAULT_SCOPE = "profile email" - GOOGLE_SETTINGS_DEFAULT_AUTH_ENDPOINT = "https://accounts.google.com/o/oauth2/v2/auth" - GOOGLE_SETTINGS_DEFAULT_TOKEN_ENDPOINT = "https://www.googleapis.com/oauth2/v4/token" - GOOGLE_SETTINGS_DEFAULT_USER_API_ENDPOINT = "https://people.googleapis.com/v1/people/me?personFields=names,emailAddresses,nicknames,metadata" - - OFFICE365_SETTINGS_DEFAULT_SCOPE = "User.Read" - OFFICE365_SETTINGS_DEFAULT_AUTH_ENDPOINT = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize" - OFFICE365_SETTINGS_DEFAULT_TOKEN_ENDPOINT = "https://login.microsoftonline.com/common/oauth2/v2.0/token" - OFFICE365_SETTINGS_DEFAULT_USER_API_ENDPOINT = "https://graph.microsoft.com/v1.0/me" - - LOCAL_MODE_SOCKET_PATH = "/var/tmp/mattermost_local.socket" -) - -var ServerTLSSupportedCiphers = map[string]uint16{ - "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, - "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, - "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, -} - -type ServiceSettings struct { - SiteURL *string `restricted:"true"` - WebsocketURL *string `restricted:"true"` - LicenseFileLocation *string `restricted:"true"` - ListenAddress *string `restricted:"true"` - ConnectionSecurity *string `restricted:"true"` - TLSCertFile *string `restricted:"true"` - TLSKeyFile *string `restricted:"true"` - TLSMinVer *string `restricted:"true"` - TLSStrictTransport *bool `restricted:"true"` - TLSStrictTransportMaxAge *int64 `restricted:"true"` - TLSOverwriteCiphers []string `restricted:"true"` - UseLetsEncrypt *bool `restricted:"true"` - LetsEncryptCertificateCacheFile *string `restricted:"true"` - Forward80To443 *bool `restricted:"true"` - TrustedProxyIPHeader []string `restricted:"true"` - ReadTimeout *int `restricted:"true"` - WriteTimeout *int `restricted:"true"` - IdleTimeout *int `restricted:"true"` - MaximumLoginAttempts *int `restricted:"true"` - GoroutineHealthThreshold *int `restricted:"true"` - GoogleDeveloperKey *string `restricted:"true"` - EnableOAuthServiceProvider *bool - EnableIncomingWebhooks *bool - EnableOutgoingWebhooks *bool - EnableCommands *bool - DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations *bool `json:"EnableOnlyAdminIntegrations" mapstructure:"EnableOnlyAdminIntegrations"` // This field is deprecated and must not be used. - EnablePostUsernameOverride *bool - EnablePostIconOverride *bool - EnableLinkPreviews *bool - EnableTesting *bool `restricted:"true"` - EnableDeveloper *bool `restricted:"true"` - EnableOpenTracing *bool `restricted:"true"` - EnableSecurityFixAlert *bool `restricted:"true"` - EnableInsecureOutgoingConnections *bool `restricted:"true"` - AllowedUntrustedInternalConnections *string `restricted:"true"` - EnableMultifactorAuthentication *bool - EnforceMultifactorAuthentication *bool - EnableUserAccessTokens *bool - AllowCorsFrom *string `restricted:"true"` - CorsExposedHeaders *string `restricted:"true"` - CorsAllowCredentials *bool `restricted:"true"` - CorsDebug *bool `restricted:"true"` - AllowCookiesForSubdomains *bool `restricted:"true"` - ExtendSessionLengthWithActivity *bool `restricted:"true"` - SessionLengthWebInDays *int `restricted:"true"` - SessionLengthMobileInDays *int `restricted:"true"` - SessionLengthSSOInDays *int `restricted:"true"` - SessionCacheInMinutes *int `restricted:"true"` - SessionIdleTimeoutInMinutes *int `restricted:"true"` - WebsocketSecurePort *int `restricted:"true"` - WebsocketPort *int `restricted:"true"` - WebserverMode *string `restricted:"true"` - EnableCustomEmoji *bool - EnableEmojiPicker *bool - EnableGifPicker *bool - GfycatApiKey *string - GfycatApiSecret *string - DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation *string `json:"RestrictCustomEmojiCreation" mapstructure:"RestrictCustomEmojiCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPostDelete *string `json:"RestrictPostDelete" mapstructure:"RestrictPostDelete"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_AllowEditPost *string `json:"AllowEditPost" mapstructure:"AllowEditPost"` // This field is deprecated and must not be used. - PostEditTimeLimit *int - TimeBetweenUserTypingUpdatesMilliseconds *int64 `restricted:"true"` - EnablePostSearch *bool `restricted:"true"` - MinimumHashtagLength *int `restricted:"true"` - EnableUserTypingMessages *bool `restricted:"true"` - EnableChannelViewedMessages *bool `restricted:"true"` - EnableUserStatuses *bool `restricted:"true"` - ExperimentalEnableAuthenticationTransfer *bool `restricted:"true"` - ClusterLogTimeoutMilliseconds *int `restricted:"true"` - CloseUnusedDirectMessages *bool - EnablePreviewFeatures *bool - EnableTutorial *bool - ExperimentalEnableDefaultChannelLeaveJoinMessages *bool - ExperimentalGroupUnreadChannels *string - ExperimentalChannelOrganization *bool - ExperimentalChannelSidebarOrganization *string - ExperimentalDataPrefetch *bool - DEPRECATED_DO_NOT_USE_ImageProxyType *string `json:"ImageProxyType" mapstructure:"ImageProxyType"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_ImageProxyURL *string `json:"ImageProxyURL" mapstructure:"ImageProxyURL"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_ImageProxyOptions *string `json:"ImageProxyOptions" mapstructure:"ImageProxyOptions"` // This field is deprecated and must not be used. - EnableAPITeamDeletion *bool - ExperimentalEnableHardenedMode *bool - DisableLegacyMFA *bool `restricted:"true"` - ExperimentalStrictCSRFEnforcement *bool `restricted:"true"` - EnableEmailInvitations *bool - DisableBotsWhenOwnerIsDeactivated *bool `restricted:"true"` - EnableBotAccountCreation *bool - EnableSVGs *bool - EnableLatex *bool - EnableLocalMode *bool - LocalModeSocketLocation *string -} - -func (s *ServiceSettings) SetDefaults(isUpdate bool) { - if s.EnableEmailInvitations == nil { - // If the site URL is also not present then assume this is a clean install - if s.SiteURL == nil { - s.EnableEmailInvitations = NewBool(false) - } else { - s.EnableEmailInvitations = NewBool(true) - } - } - - if s.SiteURL == nil { - if s.EnableDeveloper != nil && *s.EnableDeveloper { - s.SiteURL = NewString(SERVICE_SETTINGS_DEFAULT_SITE_URL) - } else { - s.SiteURL = NewString("") - } - } - - if s.WebsocketURL == nil { - s.WebsocketURL = NewString("") - } - - if s.LicenseFileLocation == nil { - s.LicenseFileLocation = NewString("") - } - - if s.ListenAddress == nil { - s.ListenAddress = NewString(SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS) - } - - if s.EnableLinkPreviews == nil { - s.EnableLinkPreviews = NewBool(true) - } - - if s.EnableTesting == nil { - s.EnableTesting = NewBool(false) - } - - if s.EnableDeveloper == nil { - s.EnableDeveloper = NewBool(false) - } - - if s.EnableOpenTracing == nil { - s.EnableOpenTracing = NewBool(false) - } - - if s.EnableSecurityFixAlert == nil { - s.EnableSecurityFixAlert = NewBool(true) - } - - if s.EnableInsecureOutgoingConnections == nil { - s.EnableInsecureOutgoingConnections = NewBool(false) - } - - if s.AllowedUntrustedInternalConnections == nil { - s.AllowedUntrustedInternalConnections = NewString("") - } - - if s.EnableMultifactorAuthentication == nil { - s.EnableMultifactorAuthentication = NewBool(false) - } - - if s.EnforceMultifactorAuthentication == nil { - s.EnforceMultifactorAuthentication = NewBool(false) - } - - if s.EnableUserAccessTokens == nil { - s.EnableUserAccessTokens = NewBool(false) - } - - if s.GoroutineHealthThreshold == nil { - s.GoroutineHealthThreshold = NewInt(-1) - } - - if s.GoogleDeveloperKey == nil { - s.GoogleDeveloperKey = NewString("") - } - - if s.EnableOAuthServiceProvider == nil { - s.EnableOAuthServiceProvider = NewBool(false) - } - - if s.EnableIncomingWebhooks == nil { - s.EnableIncomingWebhooks = NewBool(true) - } - - if s.EnableOutgoingWebhooks == nil { - s.EnableOutgoingWebhooks = NewBool(true) - } - - if s.ConnectionSecurity == nil { - s.ConnectionSecurity = NewString("") - } - - if s.TLSKeyFile == nil { - s.TLSKeyFile = NewString(SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE) - } - - if s.TLSCertFile == nil { - s.TLSCertFile = NewString(SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE) - } - - if s.TLSMinVer == nil { - s.TLSMinVer = NewString("1.2") - } - - if s.TLSStrictTransport == nil { - s.TLSStrictTransport = NewBool(false) - } - - if s.TLSStrictTransportMaxAge == nil { - s.TLSStrictTransportMaxAge = NewInt64(63072000) - } - - if s.TLSOverwriteCiphers == nil { - s.TLSOverwriteCiphers = []string{} - } - - if s.UseLetsEncrypt == nil { - s.UseLetsEncrypt = NewBool(false) - } - - if s.LetsEncryptCertificateCacheFile == nil { - s.LetsEncryptCertificateCacheFile = NewString("./config/letsencrypt.cache") - } - - if s.ReadTimeout == nil { - s.ReadTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT) - } - - if s.WriteTimeout == nil { - s.WriteTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT) - } - - if s.IdleTimeout == nil { - s.IdleTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_IDLE_TIMEOUT) - } - - if s.MaximumLoginAttempts == nil { - s.MaximumLoginAttempts = NewInt(SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS) - } - - if s.Forward80To443 == nil { - s.Forward80To443 = NewBool(false) - } - - if isUpdate { - // When updating an existing configuration, ensure that defaults are set. - if s.TrustedProxyIPHeader == nil { - s.TrustedProxyIPHeader = []string{HEADER_FORWARDED, HEADER_REAL_IP} - } - } else { - // When generating a blank configuration, leave the list empty. - s.TrustedProxyIPHeader = []string{} - } - - if s.TimeBetweenUserTypingUpdatesMilliseconds == nil { - s.TimeBetweenUserTypingUpdatesMilliseconds = NewInt64(5000) - } - - if s.EnablePostSearch == nil { - s.EnablePostSearch = NewBool(true) - } - - if s.MinimumHashtagLength == nil { - s.MinimumHashtagLength = NewInt(3) - } - - if s.EnableUserTypingMessages == nil { - s.EnableUserTypingMessages = NewBool(true) - } - - if s.EnableChannelViewedMessages == nil { - s.EnableChannelViewedMessages = NewBool(true) - } - - if s.EnableUserStatuses == nil { - s.EnableUserStatuses = NewBool(true) - } - - if s.ClusterLogTimeoutMilliseconds == nil { - s.ClusterLogTimeoutMilliseconds = NewInt(2000) - } - - if s.CloseUnusedDirectMessages == nil { - s.CloseUnusedDirectMessages = NewBool(false) - } - - if s.EnableTutorial == nil { - s.EnableTutorial = NewBool(true) - } - - // Must be manually enabled for existing installations. - if s.ExtendSessionLengthWithActivity == nil { - s.ExtendSessionLengthWithActivity = NewBool(!isUpdate) - } - - if s.SessionLengthWebInDays == nil { - if isUpdate { - s.SessionLengthWebInDays = NewInt(180) - } else { - s.SessionLengthWebInDays = NewInt(30) - } - } - - if s.SessionLengthMobileInDays == nil { - if isUpdate { - s.SessionLengthMobileInDays = NewInt(180) - } else { - s.SessionLengthMobileInDays = NewInt(30) - } - } - - if s.SessionLengthSSOInDays == nil { - s.SessionLengthSSOInDays = NewInt(30) - } - - if s.SessionCacheInMinutes == nil { - s.SessionCacheInMinutes = NewInt(10) - } - - if s.SessionIdleTimeoutInMinutes == nil { - s.SessionIdleTimeoutInMinutes = NewInt(43200) - } - - if s.EnableCommands == nil { - s.EnableCommands = NewBool(true) - } - - if s.DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations == nil { - s.DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations = NewBool(true) - } - - if s.EnablePostUsernameOverride == nil { - s.EnablePostUsernameOverride = NewBool(false) - } - - if s.EnablePostIconOverride == nil { - s.EnablePostIconOverride = NewBool(false) - } - - if s.WebsocketPort == nil { - s.WebsocketPort = NewInt(80) - } - - if s.WebsocketSecurePort == nil { - s.WebsocketSecurePort = NewInt(443) - } - - if s.AllowCorsFrom == nil { - s.AllowCorsFrom = NewString(SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM) - } - - if s.CorsExposedHeaders == nil { - s.CorsExposedHeaders = NewString("") - } - - if s.CorsAllowCredentials == nil { - s.CorsAllowCredentials = NewBool(false) - } - - if s.CorsDebug == nil { - s.CorsDebug = NewBool(false) - } - - if s.AllowCookiesForSubdomains == nil { - s.AllowCookiesForSubdomains = NewBool(false) - } - - if s.WebserverMode == nil { - s.WebserverMode = NewString("gzip") - } else if *s.WebserverMode == "regular" { - *s.WebserverMode = "gzip" - } - - if s.EnableCustomEmoji == nil { - s.EnableCustomEmoji = NewBool(false) - } - - if s.EnableEmojiPicker == nil { - s.EnableEmojiPicker = NewBool(true) - } - - if s.EnableGifPicker == nil { - s.EnableGifPicker = NewBool(false) - } - - if s.GfycatApiKey == nil || *s.GfycatApiKey == "" { - s.GfycatApiKey = NewString(SERVICE_SETTINGS_DEFAULT_GFYCAT_API_KEY) - } - - if s.GfycatApiSecret == nil || *s.GfycatApiSecret == "" { - s.GfycatApiSecret = NewString(SERVICE_SETTINGS_DEFAULT_GFYCAT_API_SECRET) - } - - if s.DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation == nil { - s.DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation = NewString(RESTRICT_EMOJI_CREATION_ALL) - } - - if s.DEPRECATED_DO_NOT_USE_RestrictPostDelete == nil { - s.DEPRECATED_DO_NOT_USE_RestrictPostDelete = NewString(PERMISSIONS_DELETE_POST_ALL) - } - - if s.DEPRECATED_DO_NOT_USE_AllowEditPost == nil { - s.DEPRECATED_DO_NOT_USE_AllowEditPost = NewString(ALLOW_EDIT_POST_ALWAYS) - } - - if s.ExperimentalEnableAuthenticationTransfer == nil { - s.ExperimentalEnableAuthenticationTransfer = NewBool(true) - } - - if s.PostEditTimeLimit == nil { - s.PostEditTimeLimit = NewInt(-1) - } - - if s.EnablePreviewFeatures == nil { - s.EnablePreviewFeatures = NewBool(true) - } - - if s.ExperimentalEnableDefaultChannelLeaveJoinMessages == nil { - s.ExperimentalEnableDefaultChannelLeaveJoinMessages = NewBool(true) - } - - if s.ExperimentalGroupUnreadChannels == nil { - s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DISABLED) - } else if *s.ExperimentalGroupUnreadChannels == "0" { - s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DISABLED) - } else if *s.ExperimentalGroupUnreadChannels == "1" { - s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DEFAULT_ON) - } - - if s.ExperimentalChannelOrganization == nil { - experimentalUnreadEnabled := *s.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DISABLED - s.ExperimentalChannelOrganization = NewBool(experimentalUnreadEnabled) - } - - if s.ExperimentalChannelSidebarOrganization == nil { - s.ExperimentalChannelSidebarOrganization = NewString("disabled") - } - - if s.ExperimentalDataPrefetch == nil { - s.ExperimentalDataPrefetch = NewBool(true) - } - - if s.DEPRECATED_DO_NOT_USE_ImageProxyType == nil { - s.DEPRECATED_DO_NOT_USE_ImageProxyType = NewString("") - } - - if s.DEPRECATED_DO_NOT_USE_ImageProxyURL == nil { - s.DEPRECATED_DO_NOT_USE_ImageProxyURL = NewString("") - } - - if s.DEPRECATED_DO_NOT_USE_ImageProxyOptions == nil { - s.DEPRECATED_DO_NOT_USE_ImageProxyOptions = NewString("") - } - - if s.EnableAPITeamDeletion == nil { - s.EnableAPITeamDeletion = NewBool(false) - } - - if s.ExperimentalEnableHardenedMode == nil { - s.ExperimentalEnableHardenedMode = NewBool(false) - } - - if s.DisableLegacyMFA == nil { - s.DisableLegacyMFA = NewBool(!isUpdate) - } - - if s.ExperimentalStrictCSRFEnforcement == nil { - s.ExperimentalStrictCSRFEnforcement = NewBool(false) - } - - if s.DisableBotsWhenOwnerIsDeactivated == nil { - s.DisableBotsWhenOwnerIsDeactivated = NewBool(true) - } - - if s.EnableBotAccountCreation == nil { - s.EnableBotAccountCreation = NewBool(false) - } - - if s.EnableSVGs == nil { - if isUpdate { - s.EnableSVGs = NewBool(true) - } else { - s.EnableSVGs = NewBool(false) - } - } - - if s.EnableLatex == nil { - if isUpdate { - s.EnableLatex = NewBool(true) - } else { - s.EnableLatex = NewBool(false) - } - } - - if s.EnableLocalMode == nil { - s.EnableLocalMode = NewBool(false) - } - - if s.LocalModeSocketLocation == nil { - s.LocalModeSocketLocation = NewString(LOCAL_MODE_SOCKET_PATH) - } -} - -type ClusterSettings struct { - Enable *bool `restricted:"true"` - ClusterName *string `restricted:"true"` - OverrideHostname *string `restricted:"true"` - NetworkInterface *string `restricted:"true"` - BindAddress *string `restricted:"true"` - AdvertiseAddress *string `restricted:"true"` - UseIpAddress *bool `restricted:"true"` - UseExperimentalGossip *bool `restricted:"true"` - EnableExperimentalGossipEncryption *bool `restricted:"true"` - ReadOnlyConfig *bool `restricted:"true"` - GossipPort *int `restricted:"true"` - StreamingPort *int `restricted:"true"` - MaxIdleConns *int `restricted:"true"` - MaxIdleConnsPerHost *int `restricted:"true"` - IdleConnTimeoutMilliseconds *int `restricted:"true"` -} - -func (s *ClusterSettings) SetDefaults() { - if s.Enable == nil { - s.Enable = NewBool(false) - } - - if s.ClusterName == nil { - s.ClusterName = NewString("") - } - - if s.OverrideHostname == nil { - s.OverrideHostname = NewString("") - } - - if s.NetworkInterface == nil { - s.NetworkInterface = NewString("") - } - - if s.BindAddress == nil { - s.BindAddress = NewString("") - } - - if s.AdvertiseAddress == nil { - s.AdvertiseAddress = NewString("") - } - - if s.UseIpAddress == nil { - s.UseIpAddress = NewBool(true) - } - - if s.UseExperimentalGossip == nil { - s.UseExperimentalGossip = NewBool(false) - } - - if s.EnableExperimentalGossipEncryption == nil { - s.EnableExperimentalGossipEncryption = NewBool(false) - } - - if s.ReadOnlyConfig == nil { - s.ReadOnlyConfig = NewBool(true) - } - - if s.GossipPort == nil { - s.GossipPort = NewInt(8074) - } - - if s.StreamingPort == nil { - s.StreamingPort = NewInt(8075) - } - - if s.MaxIdleConns == nil { - s.MaxIdleConns = NewInt(100) - } - - if s.MaxIdleConnsPerHost == nil { - s.MaxIdleConnsPerHost = NewInt(128) - } - - if s.IdleConnTimeoutMilliseconds == nil { - s.IdleConnTimeoutMilliseconds = NewInt(90000) - } -} - -type MetricsSettings struct { - Enable *bool `restricted:"true"` - BlockProfileRate *int `restricted:"true"` - ListenAddress *string `restricted:"true"` -} - -func (s *MetricsSettings) SetDefaults() { - if s.ListenAddress == nil { - s.ListenAddress = NewString(":8067") - } - - if s.Enable == nil { - s.Enable = NewBool(false) - } - - if s.BlockProfileRate == nil { - s.BlockProfileRate = NewInt(0) - } -} - -type ExperimentalSettings struct { - ClientSideCertEnable *bool - ClientSideCertCheck *string - EnableClickToReply *bool `restricted:"true"` - LinkMetadataTimeoutMilliseconds *int64 `restricted:"true"` - RestrictSystemAdmin *bool `restricted:"true"` - UseNewSAMLLibrary *bool -} - -func (s *ExperimentalSettings) SetDefaults() { - if s.ClientSideCertEnable == nil { - s.ClientSideCertEnable = NewBool(false) - } - - if s.ClientSideCertCheck == nil { - s.ClientSideCertCheck = NewString(CLIENT_SIDE_CERT_CHECK_SECONDARY_AUTH) - } - - if s.EnableClickToReply == nil { - s.EnableClickToReply = NewBool(false) - } - - if s.LinkMetadataTimeoutMilliseconds == nil { - s.LinkMetadataTimeoutMilliseconds = NewInt64(EXPERIMENTAL_SETTINGS_DEFAULT_LINK_METADATA_TIMEOUT_MILLISECONDS) - } - - if s.RestrictSystemAdmin == nil { - s.RestrictSystemAdmin = NewBool(false) - } - if s.UseNewSAMLLibrary == nil { - s.UseNewSAMLLibrary = NewBool(false) - } -} - -type AnalyticsSettings struct { - MaxUsersForStatistics *int `restricted:"true"` -} - -func (s *AnalyticsSettings) SetDefaults() { - if s.MaxUsersForStatistics == nil { - s.MaxUsersForStatistics = NewInt(ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS) - } -} - -type SSOSettings struct { - Enable *bool - Secret *string - Id *string - Scope *string - AuthEndpoint *string - TokenEndpoint *string - UserApiEndpoint *string -} - -func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEndpoint string) { - if s.Enable == nil { - s.Enable = NewBool(false) - } - - if s.Secret == nil { - s.Secret = NewString("") - } - - if s.Id == nil { - s.Id = NewString("") - } - - if s.Scope == nil { - s.Scope = NewString(scope) - } - - if s.AuthEndpoint == nil { - s.AuthEndpoint = NewString(authEndpoint) - } - - if s.TokenEndpoint == nil { - s.TokenEndpoint = NewString(tokenEndpoint) - } - - if s.UserApiEndpoint == nil { - s.UserApiEndpoint = NewString(userApiEndpoint) - } -} - -type Office365Settings struct { - Enable *bool - Secret *string - Id *string - Scope *string - AuthEndpoint *string - TokenEndpoint *string - UserApiEndpoint *string - DirectoryId *string -} - -func (s *Office365Settings) setDefaults() { - if s.Enable == nil { - s.Enable = NewBool(false) - } - - if s.Id == nil { - s.Id = NewString("") - } - - if s.Secret == nil { - s.Secret = NewString("") - } - - if s.Scope == nil { - s.Scope = NewString(OFFICE365_SETTINGS_DEFAULT_SCOPE) - } - - if s.AuthEndpoint == nil { - s.AuthEndpoint = NewString(OFFICE365_SETTINGS_DEFAULT_AUTH_ENDPOINT) - } - - if s.TokenEndpoint == nil { - s.TokenEndpoint = NewString(OFFICE365_SETTINGS_DEFAULT_TOKEN_ENDPOINT) - } - - if s.UserApiEndpoint == nil { - s.UserApiEndpoint = NewString(OFFICE365_SETTINGS_DEFAULT_USER_API_ENDPOINT) - } - - if s.DirectoryId == nil { - s.DirectoryId = NewString("") - } -} - -func (s *Office365Settings) SSOSettings() *SSOSettings { - ssoSettings := SSOSettings{} - ssoSettings.Enable = s.Enable - ssoSettings.Secret = s.Secret - ssoSettings.Id = s.Id - ssoSettings.Scope = s.Scope - ssoSettings.AuthEndpoint = s.AuthEndpoint - ssoSettings.TokenEndpoint = s.TokenEndpoint - ssoSettings.UserApiEndpoint = s.UserApiEndpoint - return &ssoSettings -} - -type SqlSettings struct { - DriverName *string `restricted:"true"` - DataSource *string `restricted:"true"` - DataSourceReplicas []string `restricted:"true"` - DataSourceSearchReplicas []string `restricted:"true"` - MaxIdleConns *int `restricted:"true"` - ConnMaxLifetimeMilliseconds *int `restricted:"true"` - MaxOpenConns *int `restricted:"true"` - Trace *bool `restricted:"true"` - AtRestEncryptKey *string `restricted:"true"` - QueryTimeout *int `restricted:"true"` - DisableDatabaseSearch *bool `restricted:"true"` -} - -func (s *SqlSettings) SetDefaults(isUpdate bool) { - if s.DriverName == nil { - s.DriverName = NewString(DATABASE_DRIVER_MYSQL) - } - - if s.DataSource == nil { - s.DataSource = NewString(SQL_SETTINGS_DEFAULT_DATA_SOURCE) - } - - if s.DataSourceReplicas == nil { - s.DataSourceReplicas = []string{} - } - - if s.DataSourceSearchReplicas == nil { - s.DataSourceSearchReplicas = []string{} - } - - if isUpdate { - // When updating an existing configuration, ensure an encryption key has been specified. - if s.AtRestEncryptKey == nil || len(*s.AtRestEncryptKey) == 0 { - s.AtRestEncryptKey = NewString(NewRandomString(32)) - } - } else { - // When generating a blank configuration, leave this key empty to be generated on server start. - s.AtRestEncryptKey = NewString("") - } - - if s.MaxIdleConns == nil { - s.MaxIdleConns = NewInt(20) - } - - if s.MaxOpenConns == nil { - s.MaxOpenConns = NewInt(300) - } - - if s.ConnMaxLifetimeMilliseconds == nil { - s.ConnMaxLifetimeMilliseconds = NewInt(3600000) - } - - if s.Trace == nil { - s.Trace = NewBool(false) - } - - if s.QueryTimeout == nil { - s.QueryTimeout = NewInt(30) - } - - if s.DisableDatabaseSearch == nil { - s.DisableDatabaseSearch = NewBool(false) - } -} - -type LogSettings struct { - EnableConsole *bool `restricted:"true"` - ConsoleLevel *string `restricted:"true"` - ConsoleJson *bool `restricted:"true"` - EnableFile *bool `restricted:"true"` - FileLevel *string `restricted:"true"` - FileJson *bool `restricted:"true"` - FileLocation *string `restricted:"true"` - EnableWebhookDebugging *bool `restricted:"true"` - EnableDiagnostics *bool `restricted:"true"` - EnableSentry *bool `restricted:"true"` - AdvancedLoggingConfig *string `restricted:"true"` -} - -func (s *LogSettings) SetDefaults() { - if s.EnableConsole == nil { - s.EnableConsole = NewBool(true) - } - - if s.ConsoleLevel == nil { - s.ConsoleLevel = NewString("DEBUG") - } - - if s.EnableFile == nil { - s.EnableFile = NewBool(true) - } - - if s.FileLevel == nil { - s.FileLevel = NewString("INFO") - } - - if s.FileLocation == nil { - s.FileLocation = NewString("") - } - - if s.EnableWebhookDebugging == nil { - s.EnableWebhookDebugging = NewBool(true) - } - - if s.EnableDiagnostics == nil { - s.EnableDiagnostics = NewBool(true) - } - - if s.EnableSentry == nil { - s.EnableSentry = NewBool(*s.EnableDiagnostics) - } - - if s.ConsoleJson == nil { - s.ConsoleJson = NewBool(true) - } - - if s.FileJson == nil { - s.FileJson = NewBool(true) - } - - if s.AdvancedLoggingConfig == nil { - s.AdvancedLoggingConfig = NewString("") - } -} - -type ExperimentalAuditSettings struct { - SysLogEnabled *bool `restricted:"true"` - SysLogIP *string `restricted:"true"` - SysLogPort *int `restricted:"true"` - SysLogTag *string `restricted:"true"` - SysLogCert *string `restricted:"true"` - SysLogInsecure *bool `restricted:"true"` - SysLogMaxQueueSize *int `restricted:"true"` - - FileEnabled *bool `restricted:"true"` - FileName *string `restricted:"true"` - FileMaxSizeMB *int `restricted:"true"` - FileMaxAgeDays *int `restricted:"true"` - FileMaxBackups *int `restricted:"true"` - FileCompress *bool `restricted:"true"` - FileMaxQueueSize *int `restricted:"true"` -} - -func (s *ExperimentalAuditSettings) SetDefaults() { - if s.SysLogEnabled == nil { - s.SysLogEnabled = NewBool(false) - } - - if s.SysLogIP == nil { - s.SysLogIP = NewString("localhost") - } - - if s.SysLogPort == nil { - s.SysLogPort = NewInt(6514) - } - - if s.SysLogTag == nil { - s.SysLogTag = NewString("") - } - - if s.SysLogCert == nil { - s.SysLogCert = NewString("") - } - - if s.SysLogInsecure == nil { - s.SysLogInsecure = NewBool(false) - } - - if s.SysLogMaxQueueSize == nil { - s.SysLogMaxQueueSize = NewInt(1000) - } - - if s.FileEnabled == nil { - s.FileEnabled = NewBool(false) - } - - if s.FileName == nil { - s.FileName = NewString("") - } - - if s.FileMaxSizeMB == nil { - s.FileMaxSizeMB = NewInt(100) - } - - if s.FileMaxAgeDays == nil { - s.FileMaxAgeDays = NewInt(0) // no limit on age - } - - if s.FileMaxBackups == nil { // no limit on number of backups - s.FileMaxBackups = NewInt(0) - } - - if s.FileCompress == nil { - s.FileCompress = NewBool(false) - } - - if s.FileMaxQueueSize == nil { - s.FileMaxQueueSize = NewInt(1000) - } -} - -type NotificationLogSettings struct { - EnableConsole *bool `restricted:"true"` - ConsoleLevel *string `restricted:"true"` - ConsoleJson *bool `restricted:"true"` - EnableFile *bool `restricted:"true"` - FileLevel *string `restricted:"true"` - FileJson *bool `restricted:"true"` - FileLocation *string `restricted:"true"` -} - -func (s *NotificationLogSettings) SetDefaults() { - if s.EnableConsole == nil { - s.EnableConsole = NewBool(true) - } - - if s.ConsoleLevel == nil { - s.ConsoleLevel = NewString("DEBUG") - } - - if s.EnableFile == nil { - s.EnableFile = NewBool(true) - } - - if s.FileLevel == nil { - s.FileLevel = NewString("INFO") - } - - if s.FileLocation == nil { - s.FileLocation = NewString("") - } - - if s.ConsoleJson == nil { - s.ConsoleJson = NewBool(true) - } - - if s.FileJson == nil { - s.FileJson = NewBool(true) - } -} - -type PasswordSettings struct { - MinimumLength *int - Lowercase *bool - Number *bool - Uppercase *bool - Symbol *bool -} - -func (s *PasswordSettings) SetDefaults() { - if s.MinimumLength == nil { - s.MinimumLength = NewInt(10) - } - - if s.Lowercase == nil { - s.Lowercase = NewBool(true) - } - - if s.Number == nil { - s.Number = NewBool(true) - } - - if s.Uppercase == nil { - s.Uppercase = NewBool(true) - } - - if s.Symbol == nil { - s.Symbol = NewBool(true) - } -} - -type FileSettings struct { - EnableFileAttachments *bool - EnableMobileUpload *bool - EnableMobileDownload *bool - MaxFileSize *int64 - DriverName *string `restricted:"true"` - Directory *string `restricted:"true"` - EnablePublicLink *bool - PublicLinkSalt *string - InitialFont *string - AmazonS3AccessKeyId *string `restricted:"true"` - AmazonS3SecretAccessKey *string `restricted:"true"` - AmazonS3Bucket *string `restricted:"true"` - AmazonS3PathPrefix *string `restricted:"true"` - AmazonS3Region *string `restricted:"true"` - AmazonS3Endpoint *string `restricted:"true"` - AmazonS3SSL *bool `restricted:"true"` - AmazonS3SignV2 *bool `restricted:"true"` - AmazonS3SSE *bool `restricted:"true"` - AmazonS3Trace *bool `restricted:"true"` -} - -func (s *FileSettings) SetDefaults(isUpdate bool) { - if s.EnableFileAttachments == nil { - s.EnableFileAttachments = NewBool(true) - } - - if s.EnableMobileUpload == nil { - s.EnableMobileUpload = NewBool(true) - } - - if s.EnableMobileDownload == nil { - s.EnableMobileDownload = NewBool(true) - } - - if s.MaxFileSize == nil { - s.MaxFileSize = NewInt64(52428800) // 50 MB - } - - if s.DriverName == nil { - s.DriverName = NewString(IMAGE_DRIVER_LOCAL) - } - - if s.Directory == nil || *s.Directory == "" { - s.Directory = NewString(FILE_SETTINGS_DEFAULT_DIRECTORY) - } - - if s.EnablePublicLink == nil { - s.EnablePublicLink = NewBool(false) - } - - if isUpdate { - // When updating an existing configuration, ensure link salt has been specified. - if s.PublicLinkSalt == nil || len(*s.PublicLinkSalt) == 0 { - s.PublicLinkSalt = NewString(NewRandomString(32)) - } - } else { - // When generating a blank configuration, leave link salt empty to be generated on server start. - s.PublicLinkSalt = NewString("") - } - - if s.InitialFont == nil { - // Defaults to "nunito-bold.ttf" - s.InitialFont = NewString("nunito-bold.ttf") - } - - if s.AmazonS3AccessKeyId == nil { - s.AmazonS3AccessKeyId = NewString("") - } - - if s.AmazonS3SecretAccessKey == nil { - s.AmazonS3SecretAccessKey = NewString("") - } - - if s.AmazonS3Bucket == nil { - s.AmazonS3Bucket = NewString("") - } - - if s.AmazonS3PathPrefix == nil { - s.AmazonS3PathPrefix = NewString("") - } - - if s.AmazonS3Region == nil { - s.AmazonS3Region = NewString("") - } - - if s.AmazonS3Endpoint == nil || len(*s.AmazonS3Endpoint) == 0 { - // Defaults to "s3.amazonaws.com" - s.AmazonS3Endpoint = NewString("s3.amazonaws.com") - } - - if s.AmazonS3SSL == nil { - s.AmazonS3SSL = NewBool(true) // Secure by default. - } - - if s.AmazonS3SignV2 == nil { - s.AmazonS3SignV2 = new(bool) - // Signature v2 is not enabled by default. - } - - if s.AmazonS3SSE == nil { - s.AmazonS3SSE = NewBool(false) // Not Encrypted by default. - } - - if s.AmazonS3Trace == nil { - s.AmazonS3Trace = NewBool(false) - } -} - -type EmailSettings struct { - EnableSignUpWithEmail *bool - EnableSignInWithEmail *bool - EnableSignInWithUsername *bool - SendEmailNotifications *bool - UseChannelInEmailNotifications *bool - RequireEmailVerification *bool - FeedbackName *string - FeedbackEmail *string - ReplyToAddress *string - FeedbackOrganization *string - EnableSMTPAuth *bool `restricted:"true"` - SMTPUsername *string `restricted:"true"` - SMTPPassword *string `restricted:"true"` - SMTPServer *string `restricted:"true"` - SMTPPort *string `restricted:"true"` - SMTPServerTimeout *int - ConnectionSecurity *string `restricted:"true"` - SendPushNotifications *bool - PushNotificationServer *string - PushNotificationContents *string - PushNotificationBuffer *int - EnableEmailBatching *bool - EmailBatchingBufferSize *int - EmailBatchingInterval *int - EnablePreviewModeBanner *bool - SkipServerCertificateVerification *bool `restricted:"true"` - EmailNotificationContentsType *string - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string -} - -func (s *EmailSettings) SetDefaults(isUpdate bool) { - if s.EnableSignUpWithEmail == nil { - s.EnableSignUpWithEmail = NewBool(true) - } - - if s.EnableSignInWithEmail == nil { - s.EnableSignInWithEmail = NewBool(*s.EnableSignUpWithEmail) - } - - if s.EnableSignInWithUsername == nil { - s.EnableSignInWithUsername = NewBool(true) - } - - if s.SendEmailNotifications == nil { - s.SendEmailNotifications = NewBool(true) - } - - if s.UseChannelInEmailNotifications == nil { - s.UseChannelInEmailNotifications = NewBool(false) - } - - if s.RequireEmailVerification == nil { - s.RequireEmailVerification = NewBool(false) - } - - if s.FeedbackName == nil { - s.FeedbackName = NewString("") - } - - if s.FeedbackEmail == nil { - s.FeedbackEmail = NewString("test@example.com") - } - - if s.ReplyToAddress == nil { - s.ReplyToAddress = NewString("test@example.com") - } - - if s.FeedbackOrganization == nil { - s.FeedbackOrganization = NewString(EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION) - } - - if s.EnableSMTPAuth == nil { - if s.ConnectionSecurity == nil || *s.ConnectionSecurity == CONN_SECURITY_NONE { - s.EnableSMTPAuth = NewBool(false) - } else { - s.EnableSMTPAuth = NewBool(true) - } - } - - if s.SMTPUsername == nil { - s.SMTPUsername = NewString("") - } - - if s.SMTPPassword == nil { - s.SMTPPassword = NewString("") - } - - if s.SMTPServer == nil || len(*s.SMTPServer) == 0 { - s.SMTPServer = NewString("localhost") - } - - if s.SMTPPort == nil || len(*s.SMTPPort) == 0 { - s.SMTPPort = NewString("10025") - } - - if s.SMTPServerTimeout == nil || *s.SMTPServerTimeout == 0 { - s.SMTPServerTimeout = NewInt(10) - } - - if s.ConnectionSecurity == nil || *s.ConnectionSecurity == CONN_SECURITY_PLAIN { - s.ConnectionSecurity = NewString(CONN_SECURITY_NONE) - } - - if s.SendPushNotifications == nil { - s.SendPushNotifications = NewBool(!isUpdate) - } - - if s.PushNotificationServer == nil { - if isUpdate { - s.PushNotificationServer = NewString("") - } else { - s.PushNotificationServer = NewString(GENERIC_NOTIFICATION_SERVER) - } - } - - if s.PushNotificationContents == nil { - s.PushNotificationContents = NewString(FULL_NOTIFICATION) - } - - if s.PushNotificationBuffer == nil { - s.PushNotificationBuffer = NewInt(1000) - } - - if s.EnableEmailBatching == nil { - s.EnableEmailBatching = NewBool(false) - } - - if s.EmailBatchingBufferSize == nil { - s.EmailBatchingBufferSize = NewInt(EMAIL_BATCHING_BUFFER_SIZE) - } - - if s.EmailBatchingInterval == nil { - s.EmailBatchingInterval = NewInt(EMAIL_BATCHING_INTERVAL) - } - - if s.EnablePreviewModeBanner == nil { - s.EnablePreviewModeBanner = NewBool(true) - } - - if s.EnableSMTPAuth == nil { - if *s.ConnectionSecurity == CONN_SECURITY_NONE { - s.EnableSMTPAuth = NewBool(false) - } else { - s.EnableSMTPAuth = NewBool(true) - } - } - - if *s.ConnectionSecurity == CONN_SECURITY_PLAIN { - *s.ConnectionSecurity = CONN_SECURITY_NONE - } - - if s.SkipServerCertificateVerification == nil { - s.SkipServerCertificateVerification = NewBool(false) - } - - if s.EmailNotificationContentsType == nil { - s.EmailNotificationContentsType = NewString(EMAIL_NOTIFICATION_CONTENTS_FULL) - } - - if s.LoginButtonColor == nil { - s.LoginButtonColor = NewString("#0000") - } - - if s.LoginButtonBorderColor == nil { - s.LoginButtonBorderColor = NewString("#2389D7") - } - - if s.LoginButtonTextColor == nil { - s.LoginButtonTextColor = NewString("#2389D7") - } -} - -type RateLimitSettings struct { - Enable *bool `restricted:"true"` - PerSec *int `restricted:"true"` - MaxBurst *int `restricted:"true"` - MemoryStoreSize *int `restricted:"true"` - VaryByRemoteAddr *bool `restricted:"true"` - VaryByUser *bool `restricted:"true"` - VaryByHeader string `restricted:"true"` -} - -func (s *RateLimitSettings) SetDefaults() { - if s.Enable == nil { - s.Enable = NewBool(false) - } - - if s.PerSec == nil { - s.PerSec = NewInt(10) - } - - if s.MaxBurst == nil { - s.MaxBurst = NewInt(100) - } - - if s.MemoryStoreSize == nil { - s.MemoryStoreSize = NewInt(10000) - } - - if s.VaryByRemoteAddr == nil { - s.VaryByRemoteAddr = NewBool(true) - } - - if s.VaryByUser == nil { - s.VaryByUser = NewBool(false) - } -} - -type PrivacySettings struct { - ShowEmailAddress *bool - ShowFullName *bool -} - -func (s *PrivacySettings) setDefaults() { - if s.ShowEmailAddress == nil { - s.ShowEmailAddress = NewBool(true) - } - - if s.ShowFullName == nil { - s.ShowFullName = NewBool(true) - } -} - -type SupportSettings struct { - TermsOfServiceLink *string `restricted:"true"` - PrivacyPolicyLink *string `restricted:"true"` - AboutLink *string `restricted:"true"` - HelpLink *string `restricted:"true"` - ReportAProblemLink *string `restricted:"true"` - SupportEmail *string - CustomTermsOfServiceEnabled *bool - CustomTermsOfServiceReAcceptancePeriod *int - EnableAskCommunityLink *bool -} - -func (s *SupportSettings) SetDefaults() { - if !IsSafeLink(s.TermsOfServiceLink) { - *s.TermsOfServiceLink = SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK - } - - if s.TermsOfServiceLink == nil { - s.TermsOfServiceLink = NewString(SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK) - } - - if !IsSafeLink(s.PrivacyPolicyLink) { - *s.PrivacyPolicyLink = "" - } - - if s.PrivacyPolicyLink == nil { - s.PrivacyPolicyLink = NewString(SUPPORT_SETTINGS_DEFAULT_PRIVACY_POLICY_LINK) - } - - if !IsSafeLink(s.AboutLink) { - *s.AboutLink = "" - } - - if s.AboutLink == nil { - s.AboutLink = NewString(SUPPORT_SETTINGS_DEFAULT_ABOUT_LINK) - } - - if !IsSafeLink(s.HelpLink) { - *s.HelpLink = "" - } - - if s.HelpLink == nil { - s.HelpLink = NewString(SUPPORT_SETTINGS_DEFAULT_HELP_LINK) - } - - if !IsSafeLink(s.ReportAProblemLink) { - *s.ReportAProblemLink = "" - } - - if s.ReportAProblemLink == nil { - s.ReportAProblemLink = NewString(SUPPORT_SETTINGS_DEFAULT_REPORT_A_PROBLEM_LINK) - } - - if s.SupportEmail == nil { - s.SupportEmail = NewString(SUPPORT_SETTINGS_DEFAULT_SUPPORT_EMAIL) - } - - if s.CustomTermsOfServiceEnabled == nil { - s.CustomTermsOfServiceEnabled = NewBool(false) - } - - if s.CustomTermsOfServiceReAcceptancePeriod == nil { - s.CustomTermsOfServiceReAcceptancePeriod = NewInt(SUPPORT_SETTINGS_DEFAULT_RE_ACCEPTANCE_PERIOD) - } - - if s.EnableAskCommunityLink == nil { - s.EnableAskCommunityLink = NewBool(true) - } -} - -type AnnouncementSettings struct { - EnableBanner *bool - BannerText *string - BannerColor *string - BannerTextColor *string - AllowBannerDismissal *bool -} - -func (s *AnnouncementSettings) SetDefaults() { - if s.EnableBanner == nil { - s.EnableBanner = NewBool(false) - } - - if s.BannerText == nil { - s.BannerText = NewString("") - } - - if s.BannerColor == nil { - s.BannerColor = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR) - } - - if s.BannerTextColor == nil { - s.BannerTextColor = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR) - } - - if s.AllowBannerDismissal == nil { - s.AllowBannerDismissal = NewBool(true) - } -} - -type ThemeSettings struct { - EnableThemeSelection *bool - DefaultTheme *string - AllowCustomThemes *bool - AllowedThemes []string -} - -func (s *ThemeSettings) SetDefaults() { - if s.EnableThemeSelection == nil { - s.EnableThemeSelection = NewBool(true) - } - - if s.DefaultTheme == nil { - s.DefaultTheme = NewString(TEAM_SETTINGS_DEFAULT_TEAM_TEXT) - } - - if s.AllowCustomThemes == nil { - s.AllowCustomThemes = NewBool(true) - } - - if s.AllowedThemes == nil { - s.AllowedThemes = []string{} - } -} - -type TeamSettings struct { - SiteName *string - MaxUsersPerTeam *int - DEPRECATED_DO_NOT_USE_EnableTeamCreation *bool `json:"EnableTeamCreation" mapstructure:"EnableTeamCreation"` // This field is deprecated and must not be used. - EnableUserCreation *bool - EnableOpenServer *bool - EnableUserDeactivation *bool - RestrictCreationToDomains *string - EnableCustomBrand *bool - CustomBrandText *string - CustomDescriptionText *string - RestrictDirectMessage *string - DEPRECATED_DO_NOT_USE_RestrictTeamInvite *string `json:"RestrictTeamInvite" mapstructure:"RestrictTeamInvite"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement *string `json:"RestrictPublicChannelManagement" mapstructure:"RestrictPublicChannelManagement"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement *string `json:"RestrictPrivateChannelManagement" mapstructure:"RestrictPrivateChannelManagement"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation *string `json:"RestrictPublicChannelCreation" mapstructure:"RestrictPublicChannelCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation *string `json:"RestrictPrivateChannelCreation" mapstructure:"RestrictPrivateChannelCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion *string `json:"RestrictPublicChannelDeletion" mapstructure:"RestrictPublicChannelDeletion"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion *string `json:"RestrictPrivateChannelDeletion" mapstructure:"RestrictPrivateChannelDeletion"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers *string `json:"RestrictPrivateChannelManageMembers" mapstructure:"RestrictPrivateChannelManageMembers"` // This field is deprecated and must not be used. - EnableXToLeaveChannelsFromLHS *bool - UserStatusAwayTimeout *int64 - MaxChannelsPerTeam *int64 - MaxNotificationsPerChannel *int64 - EnableConfirmNotificationsToChannel *bool - TeammateNameDisplay *string - ExperimentalViewArchivedChannels *bool - ExperimentalEnableAutomaticReplies *bool - ExperimentalHideTownSquareinLHS *bool - ExperimentalTownSquareIsReadOnly *bool - LockTeammateNameDisplay *bool - ExperimentalPrimaryTeam *string - ExperimentalDefaultChannels []string -} - -func (s *TeamSettings) SetDefaults() { - - if s.SiteName == nil || *s.SiteName == "" { - s.SiteName = NewString(TEAM_SETTINGS_DEFAULT_SITE_NAME) - } - - if s.MaxUsersPerTeam == nil { - s.MaxUsersPerTeam = NewInt(TEAM_SETTINGS_DEFAULT_MAX_USERS_PER_TEAM) - } - - if s.DEPRECATED_DO_NOT_USE_EnableTeamCreation == nil { - s.DEPRECATED_DO_NOT_USE_EnableTeamCreation = NewBool(true) - } - - if s.EnableUserCreation == nil { - s.EnableUserCreation = NewBool(true) - } - - if s.EnableOpenServer == nil { - s.EnableOpenServer = NewBool(false) - } - - if s.RestrictCreationToDomains == nil { - s.RestrictCreationToDomains = NewString("") - } - - if s.EnableCustomBrand == nil { - s.EnableCustomBrand = NewBool(false) - } - - if s.EnableUserDeactivation == nil { - s.EnableUserDeactivation = NewBool(false) - } - - if s.CustomBrandText == nil { - s.CustomBrandText = NewString(TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT) - } - - if s.CustomDescriptionText == nil { - s.CustomDescriptionText = NewString(TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT) - } - - if s.RestrictDirectMessage == nil { - s.RestrictDirectMessage = NewString(DIRECT_MESSAGE_ANY) - } - - if s.DEPRECATED_DO_NOT_USE_RestrictTeamInvite == nil { - s.DEPRECATED_DO_NOT_USE_RestrictTeamInvite = NewString(PERMISSIONS_ALL) - } - - if s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement == nil { - s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement = NewString(PERMISSIONS_ALL) - } - - if s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement == nil { - s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement = NewString(PERMISSIONS_ALL) - } - - if s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation == nil { - s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation = new(string) - // If this setting does not exist, assume migration from <3.6, so use management setting as default. - if *s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement == PERMISSIONS_CHANNEL_ADMIN { - *s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation = PERMISSIONS_TEAM_ADMIN - } else { - *s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation = *s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement - } - } - - if s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation == nil { - // If this setting does not exist, assume migration from <3.6, so use management setting as default. - if *s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement == PERMISSIONS_CHANNEL_ADMIN { - s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation = NewString(PERMISSIONS_TEAM_ADMIN) - } else { - s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation = NewString(*s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement) - } - } - - if s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion == nil { - // If this setting does not exist, assume migration from <3.6, so use management setting as default. - s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion = NewString(*s.DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement) - } - - if s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion == nil { - // If this setting does not exist, assume migration from <3.6, so use management setting as default. - s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion = NewString(*s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement) - } - - if s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers == nil { - s.DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers = NewString(PERMISSIONS_ALL) - } - - if s.EnableXToLeaveChannelsFromLHS == nil { - s.EnableXToLeaveChannelsFromLHS = NewBool(false) - } - - if s.UserStatusAwayTimeout == nil { - s.UserStatusAwayTimeout = NewInt64(TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT) - } - - if s.MaxChannelsPerTeam == nil { - s.MaxChannelsPerTeam = NewInt64(2000) - } - - if s.MaxNotificationsPerChannel == nil { - s.MaxNotificationsPerChannel = NewInt64(1000) - } - - if s.EnableConfirmNotificationsToChannel == nil { - s.EnableConfirmNotificationsToChannel = NewBool(true) - } - - if s.ExperimentalEnableAutomaticReplies == nil { - s.ExperimentalEnableAutomaticReplies = NewBool(false) - } - - if s.ExperimentalHideTownSquareinLHS == nil { - s.ExperimentalHideTownSquareinLHS = NewBool(false) - } - - if s.ExperimentalTownSquareIsReadOnly == nil { - s.ExperimentalTownSquareIsReadOnly = NewBool(false) - } - - if s.ExperimentalPrimaryTeam == nil { - s.ExperimentalPrimaryTeam = NewString("") - } - - if s.ExperimentalDefaultChannels == nil { - s.ExperimentalDefaultChannels = []string{} - } - - if s.DEPRECATED_DO_NOT_USE_EnableTeamCreation == nil { - s.DEPRECATED_DO_NOT_USE_EnableTeamCreation = NewBool(true) - } - - if s.EnableUserCreation == nil { - s.EnableUserCreation = NewBool(true) - } - - if s.ExperimentalViewArchivedChannels == nil { - s.ExperimentalViewArchivedChannels = NewBool(false) - } - - if s.LockTeammateNameDisplay == nil { - s.LockTeammateNameDisplay = NewBool(false) - } -} - -type ClientRequirements struct { - AndroidLatestVersion string `restricted:"true"` - AndroidMinVersion string `restricted:"true"` - DesktopLatestVersion string `restricted:"true"` - DesktopMinVersion string `restricted:"true"` - IosLatestVersion string `restricted:"true"` - IosMinVersion string `restricted:"true"` -} - -type LdapSettings struct { - // Basic - Enable *bool - EnableSync *bool - LdapServer *string - LdapPort *int - ConnectionSecurity *string - BaseDN *string - BindUsername *string - BindPassword *string - - // Filtering - UserFilter *string - GroupFilter *string - GuestFilter *string - EnableAdminFilter *bool - AdminFilter *string - - // Group Mapping - GroupDisplayNameAttribute *string - GroupIdAttribute *string - - // User Mapping - FirstNameAttribute *string - LastNameAttribute *string - EmailAttribute *string - UsernameAttribute *string - NicknameAttribute *string - IdAttribute *string - PositionAttribute *string - LoginIdAttribute *string - PictureAttribute *string - - // Synchronization - SyncIntervalMinutes *int - - // Advanced - SkipCertificateVerification *bool - QueryTimeout *int - MaxPageSize *int - - // Customization - LoginFieldName *string - - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string - - Trace *bool -} - -func (s *LdapSettings) SetDefaults() { - if s.Enable == nil { - s.Enable = NewBool(false) - } - - // When unset should default to LDAP Enabled - if s.EnableSync == nil { - s.EnableSync = NewBool(*s.Enable) - } - - if s.EnableAdminFilter == nil { - s.EnableAdminFilter = NewBool(false) - } - - if s.LdapServer == nil { - s.LdapServer = NewString("") - } - - if s.LdapPort == nil { - s.LdapPort = NewInt(389) - } - - if s.ConnectionSecurity == nil { - s.ConnectionSecurity = NewString("") - } - - if s.BaseDN == nil { - s.BaseDN = NewString("") - } - - if s.BindUsername == nil { - s.BindUsername = NewString("") - } - - if s.BindPassword == nil { - s.BindPassword = NewString("") - } - - if s.UserFilter == nil { - s.UserFilter = NewString("") - } - - if s.GuestFilter == nil { - s.GuestFilter = NewString("") - } - - if s.AdminFilter == nil { - s.AdminFilter = NewString("") - } - - if s.GroupFilter == nil { - s.GroupFilter = NewString("") - } - - if s.GroupDisplayNameAttribute == nil { - s.GroupDisplayNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_GROUP_DISPLAY_NAME_ATTRIBUTE) - } - - if s.GroupIdAttribute == nil { - s.GroupIdAttribute = NewString(LDAP_SETTINGS_DEFAULT_GROUP_ID_ATTRIBUTE) - } - - if s.FirstNameAttribute == nil { - s.FirstNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE) - } - - if s.LastNameAttribute == nil { - s.LastNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE) - } - - if s.EmailAttribute == nil { - s.EmailAttribute = NewString(LDAP_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE) - } - - if s.UsernameAttribute == nil { - s.UsernameAttribute = NewString(LDAP_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE) - } - - if s.NicknameAttribute == nil { - s.NicknameAttribute = NewString(LDAP_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE) - } - - if s.IdAttribute == nil { - s.IdAttribute = NewString(LDAP_SETTINGS_DEFAULT_ID_ATTRIBUTE) - } - - if s.PositionAttribute == nil { - s.PositionAttribute = NewString(LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE) - } - - if s.PictureAttribute == nil { - s.PictureAttribute = NewString(LDAP_SETTINGS_DEFAULT_PICTURE_ATTRIBUTE) - } - - // For those upgrading to the version when LoginIdAttribute was added - // they need IdAttribute == LoginIdAttribute not to break - if s.LoginIdAttribute == nil { - s.LoginIdAttribute = s.IdAttribute - } - - if s.SyncIntervalMinutes == nil { - s.SyncIntervalMinutes = NewInt(60) - } - - if s.SkipCertificateVerification == nil { - s.SkipCertificateVerification = NewBool(false) - } - - if s.QueryTimeout == nil { - s.QueryTimeout = NewInt(60) - } - - if s.MaxPageSize == nil { - s.MaxPageSize = NewInt(0) - } - - if s.LoginFieldName == nil { - s.LoginFieldName = NewString(LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME) - } - - if s.LoginButtonColor == nil { - s.LoginButtonColor = NewString("#0000") - } - - if s.LoginButtonBorderColor == nil { - s.LoginButtonBorderColor = NewString("#2389D7") - } - - if s.LoginButtonTextColor == nil { - s.LoginButtonTextColor = NewString("#2389D7") - } - - if s.Trace == nil { - s.Trace = NewBool(false) - } -} - -type ComplianceSettings struct { - Enable *bool - Directory *string - EnableDaily *bool -} - -func (s *ComplianceSettings) SetDefaults() { - if s.Enable == nil { - s.Enable = NewBool(false) - } - - if s.Directory == nil { - s.Directory = NewString("./data/") - } - - if s.EnableDaily == nil { - s.EnableDaily = NewBool(false) - } -} - -type LocalizationSettings struct { - DefaultServerLocale *string - DefaultClientLocale *string - AvailableLocales *string -} - -func (s *LocalizationSettings) SetDefaults() { - if s.DefaultServerLocale == nil { - s.DefaultServerLocale = NewString(DEFAULT_LOCALE) - } - - if s.DefaultClientLocale == nil { - s.DefaultClientLocale = NewString(DEFAULT_LOCALE) - } - - if s.AvailableLocales == nil { - s.AvailableLocales = NewString("") - } -} - -type SamlSettings struct { - // Basic - Enable *bool - EnableSyncWithLdap *bool - EnableSyncWithLdapIncludeAuth *bool - - Verify *bool - Encrypt *bool - SignRequest *bool - - IdpUrl *string - IdpDescriptorUrl *string - IdpMetadataUrl *string - ServiceProviderIdentifier *string - AssertionConsumerServiceURL *string - - SignatureAlgorithm *string - CanonicalAlgorithm *string - - ScopingIDPProviderId *string - ScopingIDPName *string - - IdpCertificateFile *string - PublicCertificateFile *string - PrivateKeyFile *string - - // User Mapping - IdAttribute *string - GuestAttribute *string - EnableAdminAttribute *bool - AdminAttribute *string - FirstNameAttribute *string - LastNameAttribute *string - EmailAttribute *string - UsernameAttribute *string - NicknameAttribute *string - LocaleAttribute *string - PositionAttribute *string - - LoginButtonText *string - - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string -} - -func (s *SamlSettings) SetDefaults() { - if s.Enable == nil { - s.Enable = NewBool(false) - } - - if s.EnableSyncWithLdap == nil { - s.EnableSyncWithLdap = NewBool(false) - } - - if s.EnableSyncWithLdapIncludeAuth == nil { - s.EnableSyncWithLdapIncludeAuth = NewBool(false) - } - - if s.EnableAdminAttribute == nil { - s.EnableAdminAttribute = NewBool(false) - } - - if s.Verify == nil { - s.Verify = NewBool(true) - } - - if s.Encrypt == nil { - s.Encrypt = NewBool(true) - } - - if s.SignRequest == nil { - s.SignRequest = NewBool(false) - } - - if s.SignatureAlgorithm == nil { - s.SignatureAlgorithm = NewString(SAML_SETTINGS_DEFAULT_SIGNATURE_ALGORITHM) - } - - if s.CanonicalAlgorithm == nil { - s.CanonicalAlgorithm = NewString(SAML_SETTINGS_DEFAULT_CANONICAL_ALGORITHM) - } - - if s.IdpUrl == nil { - s.IdpUrl = NewString("") - } - - if s.IdpDescriptorUrl == nil { - s.IdpDescriptorUrl = NewString("") - } - - if s.ServiceProviderIdentifier == nil { - if s.IdpDescriptorUrl != nil { - s.ServiceProviderIdentifier = NewString(*s.IdpDescriptorUrl) - } else { - s.ServiceProviderIdentifier = NewString("") - } - } - - if s.IdpMetadataUrl == nil { - s.IdpMetadataUrl = NewString("") - } - - if s.IdpCertificateFile == nil { - s.IdpCertificateFile = NewString("") - } - - if s.PublicCertificateFile == nil { - s.PublicCertificateFile = NewString("") - } - - if s.PrivateKeyFile == nil { - s.PrivateKeyFile = NewString("") - } - - if s.AssertionConsumerServiceURL == nil { - s.AssertionConsumerServiceURL = NewString("") - } - - if s.ScopingIDPProviderId == nil { - s.ScopingIDPProviderId = NewString("") - } - - if s.ScopingIDPName == nil { - s.ScopingIDPName = NewString("") - } - - if s.LoginButtonText == nil || *s.LoginButtonText == "" { - s.LoginButtonText = NewString(USER_AUTH_SERVICE_SAML_TEXT) - } - - if s.IdAttribute == nil { - s.IdAttribute = NewString(SAML_SETTINGS_DEFAULT_ID_ATTRIBUTE) - } - - if s.GuestAttribute == nil { - s.GuestAttribute = NewString(SAML_SETTINGS_DEFAULT_GUEST_ATTRIBUTE) - } - if s.AdminAttribute == nil { - s.AdminAttribute = NewString(SAML_SETTINGS_DEFAULT_ADMIN_ATTRIBUTE) - } - if s.FirstNameAttribute == nil { - s.FirstNameAttribute = NewString(SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE) - } - - if s.LastNameAttribute == nil { - s.LastNameAttribute = NewString(SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE) - } - - if s.EmailAttribute == nil { - s.EmailAttribute = NewString(SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE) - } - - if s.UsernameAttribute == nil { - s.UsernameAttribute = NewString(SAML_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE) - } - - if s.NicknameAttribute == nil { - s.NicknameAttribute = NewString(SAML_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE) - } - - if s.PositionAttribute == nil { - s.PositionAttribute = NewString(SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE) - } - - if s.LocaleAttribute == nil { - s.LocaleAttribute = NewString(SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE) - } - - if s.LoginButtonColor == nil { - s.LoginButtonColor = NewString("#34a28b") - } - - if s.LoginButtonBorderColor == nil { - s.LoginButtonBorderColor = NewString("#2389D7") - } - - if s.LoginButtonTextColor == nil { - s.LoginButtonTextColor = NewString("#ffffff") - } -} - -type NativeAppSettings struct { - AppDownloadLink *string `restricted:"true"` - AndroidAppDownloadLink *string `restricted:"true"` - IosAppDownloadLink *string `restricted:"true"` -} - -func (s *NativeAppSettings) SetDefaults() { - if s.AppDownloadLink == nil { - s.AppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK) - } - - if s.AndroidAppDownloadLink == nil { - s.AndroidAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK) - } - - if s.IosAppDownloadLink == nil { - s.IosAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK) - } -} - -type ElasticsearchSettings struct { - ConnectionUrl *string `restricted:"true"` - Username *string `restricted:"true"` - Password *string `restricted:"true"` - EnableIndexing *bool `restricted:"true"` - EnableSearching *bool `restricted:"true"` - EnableAutocomplete *bool `restricted:"true"` - Sniff *bool `restricted:"true"` - PostIndexReplicas *int `restricted:"true"` - PostIndexShards *int `restricted:"true"` - ChannelIndexReplicas *int `restricted:"true"` - ChannelIndexShards *int `restricted:"true"` - UserIndexReplicas *int `restricted:"true"` - UserIndexShards *int `restricted:"true"` - AggregatePostsAfterDays *int `restricted:"true"` - PostsAggregatorJobStartTime *string `restricted:"true"` - IndexPrefix *string `restricted:"true"` - LiveIndexingBatchSize *int `restricted:"true"` - BulkIndexingTimeWindowSeconds *int `restricted:"true"` - RequestTimeoutSeconds *int `restricted:"true"` - SkipTLSVerification *bool `restricted:"true"` - Trace *string `restricted:"true"` -} - -func (s *ElasticsearchSettings) SetDefaults() { - if s.ConnectionUrl == nil { - s.ConnectionUrl = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL) - } - - if s.Username == nil { - s.Username = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME) - } - - if s.Password == nil { - s.Password = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD) - } - - if s.EnableIndexing == nil { - s.EnableIndexing = NewBool(false) - } - - if s.EnableSearching == nil { - s.EnableSearching = NewBool(false) - } - - if s.EnableAutocomplete == nil { - s.EnableAutocomplete = NewBool(false) - } - - if s.Sniff == nil { - s.Sniff = NewBool(true) - } - - if s.PostIndexReplicas == nil { - s.PostIndexReplicas = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS) - } - - if s.PostIndexShards == nil { - s.PostIndexShards = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS) - } - - if s.ChannelIndexReplicas == nil { - s.ChannelIndexReplicas = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_CHANNEL_INDEX_REPLICAS) - } - - if s.ChannelIndexShards == nil { - s.ChannelIndexShards = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_CHANNEL_INDEX_SHARDS) - } - - if s.UserIndexReplicas == nil { - s.UserIndexReplicas = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_USER_INDEX_REPLICAS) - } - - if s.UserIndexShards == nil { - s.UserIndexShards = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_USER_INDEX_SHARDS) - } - - if s.AggregatePostsAfterDays == nil { - s.AggregatePostsAfterDays = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_AGGREGATE_POSTS_AFTER_DAYS) - } - - if s.PostsAggregatorJobStartTime == nil { - s.PostsAggregatorJobStartTime = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_POSTS_AGGREGATOR_JOB_START_TIME) - } - - if s.IndexPrefix == nil { - s.IndexPrefix = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_INDEX_PREFIX) - } - - if s.LiveIndexingBatchSize == nil { - s.LiveIndexingBatchSize = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_LIVE_INDEXING_BATCH_SIZE) - } - - if s.BulkIndexingTimeWindowSeconds == nil { - s.BulkIndexingTimeWindowSeconds = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS) - } - - if s.RequestTimeoutSeconds == nil { - s.RequestTimeoutSeconds = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS) - } - - if s.SkipTLSVerification == nil { - s.SkipTLSVerification = NewBool(false) - } - - if s.Trace == nil { - s.Trace = NewString("") - } -} - -type BleveSettings struct { - IndexDir *string - EnableIndexing *bool - EnableSearching *bool - EnableAutocomplete *bool - BulkIndexingTimeWindowSeconds *int -} - -func (bs *BleveSettings) SetDefaults() { - if bs.IndexDir == nil { - bs.IndexDir = NewString(BLEVE_SETTINGS_DEFAULT_INDEX_DIR) - } - - if bs.EnableIndexing == nil { - bs.EnableIndexing = NewBool(false) - } - - if bs.EnableSearching == nil { - bs.EnableSearching = NewBool(false) - } - - if bs.EnableAutocomplete == nil { - bs.EnableAutocomplete = NewBool(false) - } - - if bs.BulkIndexingTimeWindowSeconds == nil { - bs.BulkIndexingTimeWindowSeconds = NewInt(BLEVE_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS) - } -} - -type DataRetentionSettings struct { - EnableMessageDeletion *bool - EnableFileDeletion *bool - MessageRetentionDays *int - FileRetentionDays *int - DeletionJobStartTime *string -} - -func (s *DataRetentionSettings) SetDefaults() { - if s.EnableMessageDeletion == nil { - s.EnableMessageDeletion = NewBool(false) - } - - if s.EnableFileDeletion == nil { - s.EnableFileDeletion = NewBool(false) - } - - if s.MessageRetentionDays == nil { - s.MessageRetentionDays = NewInt(DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS) - } - - if s.FileRetentionDays == nil { - s.FileRetentionDays = NewInt(DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS) - } - - if s.DeletionJobStartTime == nil { - s.DeletionJobStartTime = NewString(DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME) - } -} - -type JobSettings struct { - RunJobs *bool `restricted:"true"` - RunScheduler *bool `restricted:"true"` -} - -func (s *JobSettings) SetDefaults() { - if s.RunJobs == nil { - s.RunJobs = NewBool(true) - } - - if s.RunScheduler == nil { - s.RunScheduler = NewBool(true) - } -} - -type PluginState struct { - Enable bool -} - -type PluginSettings struct { - Enable *bool - EnableUploads *bool `restricted:"true"` - AllowInsecureDownloadUrl *bool `restricted:"true"` - EnableHealthCheck *bool `restricted:"true"` - Directory *string `restricted:"true"` - ClientDirectory *string `restricted:"true"` - Plugins map[string]map[string]interface{} - PluginStates map[string]*PluginState - EnableMarketplace *bool - EnableRemoteMarketplace *bool - AutomaticPrepackagedPlugins *bool - RequirePluginSignature *bool - MarketplaceUrl *string - SignaturePublicKeyFiles []string -} - -func (s *PluginSettings) SetDefaults(ls LogSettings) { - if s.Enable == nil { - s.Enable = NewBool(true) - } - - if s.EnableUploads == nil { - s.EnableUploads = NewBool(false) - } - - if s.AllowInsecureDownloadUrl == nil { - s.AllowInsecureDownloadUrl = NewBool(false) - } - - if s.EnableHealthCheck == nil { - s.EnableHealthCheck = NewBool(true) - } - - if s.Directory == nil || *s.Directory == "" { - s.Directory = NewString(PLUGIN_SETTINGS_DEFAULT_DIRECTORY) - } - - if s.ClientDirectory == nil || *s.ClientDirectory == "" { - s.ClientDirectory = NewString(PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY) - } - - if s.Plugins == nil { - s.Plugins = make(map[string]map[string]interface{}) - } - - if s.PluginStates == nil { - s.PluginStates = make(map[string]*PluginState) - } - - if s.PluginStates["com.mattermost.nps"] == nil { - // Enable the NPS plugin by default if diagnostics are enabled - s.PluginStates["com.mattermost.nps"] = &PluginState{Enable: ls.EnableDiagnostics == nil || *ls.EnableDiagnostics} - } - - if s.EnableMarketplace == nil { - s.EnableMarketplace = NewBool(PLUGIN_SETTINGS_DEFAULT_ENABLE_MARKETPLACE) - } - - if s.EnableRemoteMarketplace == nil { - s.EnableRemoteMarketplace = NewBool(true) - } - - if s.AutomaticPrepackagedPlugins == nil { - s.AutomaticPrepackagedPlugins = NewBool(true) - } - - if s.MarketplaceUrl == nil || *s.MarketplaceUrl == "" || *s.MarketplaceUrl == PLUGIN_SETTINGS_OLD_MARKETPLACE_URL { - s.MarketplaceUrl = NewString(PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL) - } - - if s.RequirePluginSignature == nil { - s.RequirePluginSignature = NewBool(false) - } - - if s.SignaturePublicKeyFiles == nil { - s.SignaturePublicKeyFiles = []string{} - } -} - -type GlobalRelayMessageExportSettings struct { - CustomerType *string // must be either A9 or A10, dictates SMTP server url - SmtpUsername *string - SmtpPassword *string - EmailAddress *string // the address to send messages to - SMTPServerTimeout *int -} - -func (s *GlobalRelayMessageExportSettings) SetDefaults() { - if s.CustomerType == nil { - s.CustomerType = NewString(GLOBALRELAY_CUSTOMER_TYPE_A9) - } - if s.SmtpUsername == nil { - s.SmtpUsername = NewString("") - } - if s.SmtpPassword == nil { - s.SmtpPassword = NewString("") - } - if s.EmailAddress == nil { - s.EmailAddress = NewString("") - } - if s.SMTPServerTimeout == nil || *s.SMTPServerTimeout == 0 { - s.SMTPServerTimeout = NewInt(1800) - } -} - -type MessageExportSettings struct { - EnableExport *bool - ExportFormat *string - DailyRunTime *string - ExportFromTimestamp *int64 - BatchSize *int - - // formatter-specific settings - these are only expected to be non-nil if ExportFormat is set to the associated format - GlobalRelaySettings *GlobalRelayMessageExportSettings -} - -func (s *MessageExportSettings) SetDefaults() { - if s.EnableExport == nil { - s.EnableExport = NewBool(false) - } - - if s.ExportFormat == nil { - s.ExportFormat = NewString(COMPLIANCE_EXPORT_TYPE_ACTIANCE) - } - - if s.DailyRunTime == nil { - s.DailyRunTime = NewString("01:00") - } - - if s.ExportFromTimestamp == nil { - s.ExportFromTimestamp = NewInt64(0) - } - - if s.BatchSize == nil { - s.BatchSize = NewInt(10000) - } - - if s.GlobalRelaySettings == nil { - s.GlobalRelaySettings = &GlobalRelayMessageExportSettings{} - } - s.GlobalRelaySettings.SetDefaults() -} - -type DisplaySettings struct { - CustomUrlSchemes []string - ExperimentalTimezone *bool -} - -func (s *DisplaySettings) SetDefaults() { - if s.CustomUrlSchemes == nil { - customUrlSchemes := []string{} - s.CustomUrlSchemes = customUrlSchemes - } - - if s.ExperimentalTimezone == nil { - s.ExperimentalTimezone = NewBool(false) - } -} - -type GuestAccountsSettings struct { - Enable *bool - AllowEmailAccounts *bool - EnforceMultifactorAuthentication *bool - RestrictCreationToDomains *string -} - -func (s *GuestAccountsSettings) SetDefaults() { - if s.Enable == nil { - s.Enable = NewBool(false) - } - - if s.AllowEmailAccounts == nil { - s.AllowEmailAccounts = NewBool(true) - } - - if s.EnforceMultifactorAuthentication == nil { - s.EnforceMultifactorAuthentication = NewBool(false) - } - - if s.RestrictCreationToDomains == nil { - s.RestrictCreationToDomains = NewString("") - } -} - -type ImageProxySettings struct { - Enable *bool - ImageProxyType *string - RemoteImageProxyURL *string - RemoteImageProxyOptions *string -} - -func (s *ImageProxySettings) SetDefaults(ss ServiceSettings) { - if s.Enable == nil { - if ss.DEPRECATED_DO_NOT_USE_ImageProxyType == nil || *ss.DEPRECATED_DO_NOT_USE_ImageProxyType == "" { - s.Enable = NewBool(false) - } else { - s.Enable = NewBool(true) - } - } - - if s.ImageProxyType == nil { - if ss.DEPRECATED_DO_NOT_USE_ImageProxyType == nil || *ss.DEPRECATED_DO_NOT_USE_ImageProxyType == "" { - s.ImageProxyType = NewString(IMAGE_PROXY_TYPE_LOCAL) - } else { - s.ImageProxyType = ss.DEPRECATED_DO_NOT_USE_ImageProxyType - } - } - - if s.RemoteImageProxyURL == nil { - if ss.DEPRECATED_DO_NOT_USE_ImageProxyURL == nil { - s.RemoteImageProxyURL = NewString("") - } else { - s.RemoteImageProxyURL = ss.DEPRECATED_DO_NOT_USE_ImageProxyURL - } - } - - if s.RemoteImageProxyOptions == nil { - if ss.DEPRECATED_DO_NOT_USE_ImageProxyOptions == nil { - s.RemoteImageProxyOptions = NewString("") - } else { - s.RemoteImageProxyOptions = ss.DEPRECATED_DO_NOT_USE_ImageProxyOptions - } - } -} - -type ConfigFunc func() *Config - -type Config struct { - ServiceSettings ServiceSettings - TeamSettings TeamSettings - ClientRequirements ClientRequirements - SqlSettings SqlSettings - LogSettings LogSettings - ExperimentalAuditSettings ExperimentalAuditSettings - NotificationLogSettings NotificationLogSettings - PasswordSettings PasswordSettings - FileSettings FileSettings - EmailSettings EmailSettings - RateLimitSettings RateLimitSettings - PrivacySettings PrivacySettings - SupportSettings SupportSettings - AnnouncementSettings AnnouncementSettings - ThemeSettings ThemeSettings - GitLabSettings SSOSettings - GoogleSettings SSOSettings - Office365Settings Office365Settings - LdapSettings LdapSettings - ComplianceSettings ComplianceSettings - LocalizationSettings LocalizationSettings - SamlSettings SamlSettings - NativeAppSettings NativeAppSettings - ClusterSettings ClusterSettings - MetricsSettings MetricsSettings - ExperimentalSettings ExperimentalSettings - AnalyticsSettings AnalyticsSettings - ElasticsearchSettings ElasticsearchSettings - BleveSettings BleveSettings - DataRetentionSettings DataRetentionSettings - MessageExportSettings MessageExportSettings - JobSettings JobSettings - PluginSettings PluginSettings - DisplaySettings DisplaySettings - GuestAccountsSettings GuestAccountsSettings - ImageProxySettings ImageProxySettings -} - -func (o *Config) Clone() *Config { - var ret Config - if err := json.Unmarshal([]byte(o.ToJson()), &ret); err != nil { - panic(err) - } - return &ret -} - -func (o *Config) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func (o *Config) GetSSOService(service string) *SSOSettings { - switch service { - case SERVICE_GITLAB: - return &o.GitLabSettings - case SERVICE_GOOGLE: - return &o.GoogleSettings - case SERVICE_OFFICE365: - return o.Office365Settings.SSOSettings() - } - - return nil -} - -func ConfigFromJson(data io.Reader) *Config { - var o *Config - json.NewDecoder(data).Decode(&o) - return o -} - -// isUpdate detects a pre-existing config based on whether SiteURL has been changed -func (o *Config) isUpdate() bool { - return o.ServiceSettings.SiteURL != nil -} - -func (o *Config) SetDefaults() { - isUpdate := o.isUpdate() - - o.LdapSettings.SetDefaults() - o.SamlSettings.SetDefaults() - - if o.TeamSettings.TeammateNameDisplay == nil { - o.TeamSettings.TeammateNameDisplay = NewString(SHOW_USERNAME) - - if *o.SamlSettings.Enable || *o.LdapSettings.Enable { - *o.TeamSettings.TeammateNameDisplay = SHOW_FULLNAME - } - } - - o.SqlSettings.SetDefaults(isUpdate) - o.FileSettings.SetDefaults(isUpdate) - o.EmailSettings.SetDefaults(isUpdate) - o.PrivacySettings.setDefaults() - o.Office365Settings.setDefaults() - o.GitLabSettings.setDefaults("", "", "", "") - o.GoogleSettings.setDefaults(GOOGLE_SETTINGS_DEFAULT_SCOPE, GOOGLE_SETTINGS_DEFAULT_AUTH_ENDPOINT, GOOGLE_SETTINGS_DEFAULT_TOKEN_ENDPOINT, GOOGLE_SETTINGS_DEFAULT_USER_API_ENDPOINT) - o.ServiceSettings.SetDefaults(isUpdate) - o.PasswordSettings.SetDefaults() - o.TeamSettings.SetDefaults() - o.MetricsSettings.SetDefaults() - o.ExperimentalSettings.SetDefaults() - o.SupportSettings.SetDefaults() - o.AnnouncementSettings.SetDefaults() - o.ThemeSettings.SetDefaults() - o.ClusterSettings.SetDefaults() - o.PluginSettings.SetDefaults(o.LogSettings) - o.AnalyticsSettings.SetDefaults() - o.ComplianceSettings.SetDefaults() - o.LocalizationSettings.SetDefaults() - o.ElasticsearchSettings.SetDefaults() - o.BleveSettings.SetDefaults() - o.NativeAppSettings.SetDefaults() - o.DataRetentionSettings.SetDefaults() - o.RateLimitSettings.SetDefaults() - o.LogSettings.SetDefaults() - o.ExperimentalAuditSettings.SetDefaults() - o.NotificationLogSettings.SetDefaults() - o.JobSettings.SetDefaults() - o.MessageExportSettings.SetDefaults() - o.DisplaySettings.SetDefaults() - o.GuestAccountsSettings.SetDefaults() - o.ImageProxySettings.SetDefaults(o.ServiceSettings) -} - -func (o *Config) IsValid() *AppError { - if len(*o.ServiceSettings.SiteURL) == 0 && *o.EmailSettings.EnableEmailBatching { - return NewAppError("Config.IsValid", "model.config.is_valid.site_url_email_batching.app_error", nil, "", http.StatusBadRequest) - } - - if *o.ClusterSettings.Enable && *o.EmailSettings.EnableEmailBatching { - return NewAppError("Config.IsValid", "model.config.is_valid.cluster_email_batching.app_error", nil, "", http.StatusBadRequest) - } - - if len(*o.ServiceSettings.SiteURL) == 0 && *o.ServiceSettings.AllowCookiesForSubdomains { - return NewAppError("Config.IsValid", "model.config.is_valid.allow_cookies_for_subdomains.app_error", nil, "", http.StatusBadRequest) - } - - if err := o.TeamSettings.isValid(); err != nil { - return err - } - - if err := o.SqlSettings.isValid(); err != nil { - return err - } - - if err := o.FileSettings.isValid(); err != nil { - return err - } - - if err := o.EmailSettings.isValid(); err != nil { - return err - } - - if err := o.LdapSettings.isValid(); err != nil { - return err - } - - if err := o.SamlSettings.isValid(); err != nil { - return err - } - - if *o.PasswordSettings.MinimumLength < PASSWORD_MINIMUM_LENGTH || *o.PasswordSettings.MinimumLength > PASSWORD_MAXIMUM_LENGTH { - return NewAppError("Config.IsValid", "model.config.is_valid.password_length.app_error", map[string]interface{}{"MinLength": PASSWORD_MINIMUM_LENGTH, "MaxLength": PASSWORD_MAXIMUM_LENGTH}, "", http.StatusBadRequest) - } - - if err := o.RateLimitSettings.isValid(); err != nil { - return err - } - - if err := o.ServiceSettings.isValid(); err != nil { - return err - } - - if err := o.ElasticsearchSettings.isValid(); err != nil { - return err - } - - if err := o.BleveSettings.isValid(); err != nil { - return err - } - - if err := o.DataRetentionSettings.isValid(); err != nil { - return err - } - - if err := o.LocalizationSettings.isValid(); err != nil { - return err - } - - if err := o.MessageExportSettings.isValid(o.FileSettings); err != nil { - return err - } - - if err := o.DisplaySettings.isValid(); err != nil { - return err - } - - if err := o.ImageProxySettings.isValid(); err != nil { - return err - } - return nil -} - -func (s *TeamSettings) isValid() *AppError { - if *s.MaxUsersPerTeam <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.max_users.app_error", nil, "", http.StatusBadRequest) - } - - if *s.MaxChannelsPerTeam <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.max_channels.app_error", nil, "", http.StatusBadRequest) - } - - if *s.MaxNotificationsPerChannel <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.max_notify_per_channel.app_error", nil, "", http.StatusBadRequest) - } - - if !(*s.RestrictDirectMessage == DIRECT_MESSAGE_ANY || *s.RestrictDirectMessage == DIRECT_MESSAGE_TEAM) { - return NewAppError("Config.IsValid", "model.config.is_valid.restrict_direct_message.app_error", nil, "", http.StatusBadRequest) - } - - if !(*s.TeammateNameDisplay == SHOW_FULLNAME || *s.TeammateNameDisplay == SHOW_NICKNAME_FULLNAME || *s.TeammateNameDisplay == SHOW_USERNAME) { - return NewAppError("Config.IsValid", "model.config.is_valid.teammate_name_display.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.SiteName) > SITENAME_MAX_LENGTH { - return NewAppError("Config.IsValid", "model.config.is_valid.sitename_length.app_error", map[string]interface{}{"MaxLength": SITENAME_MAX_LENGTH}, "", http.StatusBadRequest) - } - - return nil -} - -func (s *SqlSettings) isValid() *AppError { - if *s.AtRestEncryptKey != "" && len(*s.AtRestEncryptKey) < 32 { - return NewAppError("Config.IsValid", "model.config.is_valid.encrypt_sql.app_error", nil, "", http.StatusBadRequest) - } - - if !(*s.DriverName == DATABASE_DRIVER_MYSQL || *s.DriverName == DATABASE_DRIVER_POSTGRES) { - return NewAppError("Config.IsValid", "model.config.is_valid.sql_driver.app_error", nil, "", http.StatusBadRequest) - } - - if *s.MaxIdleConns <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.sql_idle.app_error", nil, "", http.StatusBadRequest) - } - - if *s.ConnMaxLifetimeMilliseconds < 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.sql_conn_max_lifetime_milliseconds.app_error", nil, "", http.StatusBadRequest) - } - - if *s.QueryTimeout <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.sql_query_timeout.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.DataSource) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.sql_data_src.app_error", nil, "", http.StatusBadRequest) - } - - if *s.MaxOpenConns <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.sql_max_conn.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (s *FileSettings) isValid() *AppError { - if *s.MaxFileSize <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.max_file_size.app_error", nil, "", http.StatusBadRequest) - } - - if !(*s.DriverName == IMAGE_DRIVER_LOCAL || *s.DriverName == IMAGE_DRIVER_S3) { - return NewAppError("Config.IsValid", "model.config.is_valid.file_driver.app_error", nil, "", http.StatusBadRequest) - } - - if *s.PublicLinkSalt != "" && len(*s.PublicLinkSalt) < 32 { - return NewAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "", http.StatusBadRequest) - } - - if *s.Directory == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.directory.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (s *EmailSettings) isValid() *AppError { - if !(*s.ConnectionSecurity == CONN_SECURITY_NONE || *s.ConnectionSecurity == CONN_SECURITY_TLS || *s.ConnectionSecurity == CONN_SECURITY_STARTTLS || *s.ConnectionSecurity == CONN_SECURITY_PLAIN) { - return NewAppError("Config.IsValid", "model.config.is_valid.email_security.app_error", nil, "", http.StatusBadRequest) - } - - if *s.EmailBatchingBufferSize <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_buffer_size.app_error", nil, "", http.StatusBadRequest) - } - - if *s.EmailBatchingInterval < 30 { - return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_interval.app_error", nil, "", http.StatusBadRequest) - } - - if !(*s.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_FULL || *s.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_GENERIC) { - return NewAppError("Config.IsValid", "model.config.is_valid.email_notification_contents_type.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (s *RateLimitSettings) isValid() *AppError { - if *s.MemoryStoreSize <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.rate_mem.app_error", nil, "", http.StatusBadRequest) - } - - if *s.PerSec <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.rate_sec.app_error", nil, "", http.StatusBadRequest) - } - - if *s.MaxBurst <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.max_burst.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (s *LdapSettings) isValid() *AppError { - if !(*s.ConnectionSecurity == CONN_SECURITY_NONE || *s.ConnectionSecurity == CONN_SECURITY_TLS || *s.ConnectionSecurity == CONN_SECURITY_STARTTLS) { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_security.app_error", nil, "", http.StatusBadRequest) - } - - if *s.SyncIntervalMinutes <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_sync_interval.app_error", nil, "", http.StatusBadRequest) - } - - if *s.MaxPageSize < 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_max_page_size.app_error", nil, "", http.StatusBadRequest) - } - - if *s.Enable { - if *s.LdapServer == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_server", nil, "", http.StatusBadRequest) - } - - if *s.BaseDN == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_basedn", nil, "", http.StatusBadRequest) - } - - if *s.EmailAttribute == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_email", nil, "", http.StatusBadRequest) - } - - if *s.UsernameAttribute == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_username", nil, "", http.StatusBadRequest) - } - - if *s.IdAttribute == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_id", nil, "", http.StatusBadRequest) - } - - if *s.LoginIdAttribute == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.ldap_login_id", nil, "", http.StatusBadRequest) - } - - if *s.UserFilter != "" { - if _, err := ldap.CompileFilter(*s.UserFilter); err != nil { - return NewAppError("ValidateFilter", "ent.ldap.validate_filter.app_error", nil, err.Error(), http.StatusBadRequest) - } - } - - if *s.GuestFilter != "" { - if _, err := ldap.CompileFilter(*s.GuestFilter); err != nil { - return NewAppError("LdapSettings.isValid", "ent.ldap.validate_guest_filter.app_error", nil, err.Error(), http.StatusBadRequest) - } - } - - if *s.AdminFilter != "" { - if _, err := ldap.CompileFilter(*s.AdminFilter); err != nil { - return NewAppError("LdapSettings.isValid", "ent.ldap.validate_admin_filter.app_error", nil, err.Error(), http.StatusBadRequest) - } - } - } - - return nil -} - -func (s *SamlSettings) isValid() *AppError { - if *s.Enable { - if len(*s.IdpUrl) == 0 || !IsValidHttpUrl(*s.IdpUrl) { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_url.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.IdpDescriptorUrl) == 0 || !IsValidHttpUrl(*s.IdpDescriptorUrl) { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_descriptor_url.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.IdpCertificateFile) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_cert.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.EmailAttribute) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.UsernameAttribute) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_username_attribute.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.ServiceProviderIdentifier) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_spidentifier_attribute.app_error", nil, "", http.StatusBadRequest) - } - - if *s.Verify { - if len(*s.AssertionConsumerServiceURL) == 0 || !IsValidHttpUrl(*s.AssertionConsumerServiceURL) { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_assertion_consumer_service_url.app_error", nil, "", http.StatusBadRequest) - } - } - - if *s.Encrypt { - if len(*s.PrivateKeyFile) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_private_key.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.PublicCertificateFile) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_public_cert.app_error", nil, "", http.StatusBadRequest) - } - } - - if len(*s.EmailAttribute) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) - } - - if !(*s.SignatureAlgorithm == SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA1 || *s.SignatureAlgorithm == SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA256 || *s.SignatureAlgorithm == SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA512) { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_signature_algorithm.app_error", nil, "", http.StatusBadRequest) - } - if !(*s.CanonicalAlgorithm == SAML_SETTINGS_CANONICAL_ALGORITHM_C14N || *s.CanonicalAlgorithm == SAML_SETTINGS_CANONICAL_ALGORITHM_C14N11) { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_canonical_algorithm.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.GuestAttribute) > 0 { - if !(strings.Contains(*s.GuestAttribute, "=")) { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_guest_attribute.app_error", nil, "", http.StatusBadRequest) - } - if len(strings.Split(*s.GuestAttribute, "=")) != 2 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_guest_attribute.app_error", nil, "", http.StatusBadRequest) - } - } - - if len(*s.AdminAttribute) > 0 { - if !(strings.Contains(*s.AdminAttribute, "=")) { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_admin_attribute.app_error", nil, "", http.StatusBadRequest) - } - if len(strings.Split(*s.AdminAttribute, "=")) != 2 { - return NewAppError("Config.IsValid", "model.config.is_valid.saml_admin_attribute.app_error", nil, "", http.StatusBadRequest) - } - } - } - - return nil -} - -func (s *ServiceSettings) isValid() *AppError { - if !(*s.ConnectionSecurity == CONN_SECURITY_NONE || *s.ConnectionSecurity == CONN_SECURITY_TLS) { - return NewAppError("Config.IsValid", "model.config.is_valid.webserver_security.app_error", nil, "", http.StatusBadRequest) - } - - if *s.ConnectionSecurity == CONN_SECURITY_TLS && !*s.UseLetsEncrypt { - appErr := NewAppError("Config.IsValid", "model.config.is_valid.tls_cert_file.app_error", nil, "", http.StatusBadRequest) - - if *s.TLSCertFile == "" { - return appErr - } else if _, err := os.Stat(*s.TLSCertFile); os.IsNotExist(err) { - return appErr - } - - appErr = NewAppError("Config.IsValid", "model.config.is_valid.tls_key_file.app_error", nil, "", http.StatusBadRequest) - - if *s.TLSKeyFile == "" { - return appErr - } else if _, err := os.Stat(*s.TLSKeyFile); os.IsNotExist(err) { - return appErr - } - } - - if len(s.TLSOverwriteCiphers) > 0 { - for _, cipher := range s.TLSOverwriteCiphers { - if _, ok := ServerTLSSupportedCiphers[cipher]; !ok { - return NewAppError("Config.IsValid", "model.config.is_valid.tls_overwrite_cipher.app_error", map[string]interface{}{"name": cipher}, "", http.StatusBadRequest) - } - } - } - - if *s.ReadTimeout <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.read_timeout.app_error", nil, "", http.StatusBadRequest) - } - - if *s.WriteTimeout <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.write_timeout.app_error", nil, "", http.StatusBadRequest) - } - - if *s.TimeBetweenUserTypingUpdatesMilliseconds < 1000 { - return NewAppError("Config.IsValid", "model.config.is_valid.time_between_user_typing.app_error", nil, "", http.StatusBadRequest) - } - - if *s.MaximumLoginAttempts <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.login_attempts.app_error", nil, "", http.StatusBadRequest) - } - - if len(*s.SiteURL) != 0 { - if _, err := url.ParseRequestURI(*s.SiteURL); err != nil { - return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, "", http.StatusBadRequest) - } - } - - if len(*s.WebsocketURL) != 0 { - if _, err := url.ParseRequestURI(*s.WebsocketURL); err != nil { - return NewAppError("Config.IsValid", "model.config.is_valid.websocket_url.app_error", nil, "", http.StatusBadRequest) - } - } - - host, port, _ := net.SplitHostPort(*s.ListenAddress) - var isValidHost bool - if host == "" { - isValidHost = true - } else { - isValidHost = (net.ParseIP(host) != nil) || IsDomainName(host) - } - portInt, err := strconv.Atoi(port) - if err != nil || !isValidHost || portInt < 0 || portInt > math.MaxUint16 { - return NewAppError("Config.IsValid", "model.config.is_valid.listen_address.app_error", nil, "", http.StatusBadRequest) - } - - if *s.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DISABLED && - *s.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_ON && - *s.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_OFF { - return NewAppError("Config.IsValid", "model.config.is_valid.group_unread_channels.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (s *ElasticsearchSettings) isValid() *AppError { - if *s.EnableIndexing { - if len(*s.ConnectionUrl) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.connection_url.app_error", nil, "", http.StatusBadRequest) - } - } - - if *s.EnableSearching && !*s.EnableIndexing { - return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_searching.app_error", nil, "", http.StatusBadRequest) - } - - if *s.EnableAutocomplete && !*s.EnableIndexing { - return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_autocomplete.app_error", nil, "", http.StatusBadRequest) - } - - if *s.AggregatePostsAfterDays < 1 { - return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.aggregate_posts_after_days.app_error", nil, "", http.StatusBadRequest) - } - - if _, err := time.Parse("15:04", *s.PostsAggregatorJobStartTime); err != nil { - return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.posts_aggregator_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) - } - - if *s.LiveIndexingBatchSize < 1 { - return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", nil, "", http.StatusBadRequest) - } - - if *s.BulkIndexingTimeWindowSeconds < 1 { - return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest) - } - - if *s.RequestTimeoutSeconds < 1 { - return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.request_timeout_seconds.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (bs *BleveSettings) isValid() *AppError { - if *bs.EnableIndexing { - if len(*bs.IndexDir) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.filename.app_error", nil, "", http.StatusBadRequest) - } - } else { - if *bs.EnableSearching { - return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.enable_searching.app_error", nil, "", http.StatusBadRequest) - } - if *bs.EnableAutocomplete { - return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.enable_autocomplete.app_error", nil, "", http.StatusBadRequest) - } - } - if *bs.BulkIndexingTimeWindowSeconds < 1 { - return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (s *DataRetentionSettings) isValid() *AppError { - if *s.MessageRetentionDays <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.message_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) - } - - if *s.FileRetentionDays <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.file_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) - } - - if _, err := time.Parse("15:04", *s.DeletionJobStartTime); err != nil { - return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.deletion_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) - } - - return nil -} - -func (s *LocalizationSettings) isValid() *AppError { - if len(*s.AvailableLocales) > 0 { - if !strings.Contains(*s.AvailableLocales, *s.DefaultClientLocale) { - return NewAppError("Config.IsValid", "model.config.is_valid.localization.available_locales.app_error", nil, "", http.StatusBadRequest) - } - } - - return nil -} - -func (s *MessageExportSettings) isValid(fs FileSettings) *AppError { - if s.EnableExport == nil { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.enable.app_error", nil, "", http.StatusBadRequest) - } - if *s.EnableExport { - if s.ExportFromTimestamp == nil || *s.ExportFromTimestamp < 0 || *s.ExportFromTimestamp > GetMillis() { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_from.app_error", nil, "", http.StatusBadRequest) - } else if s.DailyRunTime == nil { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, "", http.StatusBadRequest) - } else if _, err := time.Parse("15:04", *s.DailyRunTime); err != nil { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, err.Error(), http.StatusBadRequest) - } else if s.BatchSize == nil || *s.BatchSize < 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.batch_size.app_error", nil, "", http.StatusBadRequest) - } else if s.ExportFormat == nil || (*s.ExportFormat != COMPLIANCE_EXPORT_TYPE_ACTIANCE && *s.ExportFormat != COMPLIANCE_EXPORT_TYPE_GLOBALRELAY && *s.ExportFormat != COMPLIANCE_EXPORT_TYPE_CSV) { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_type.app_error", nil, "", http.StatusBadRequest) - } - - if *s.ExportFormat == COMPLIANCE_EXPORT_TYPE_GLOBALRELAY { - if s.GlobalRelaySettings == nil { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.config_missing.app_error", nil, "", http.StatusBadRequest) - } else if s.GlobalRelaySettings.CustomerType == nil || (*s.GlobalRelaySettings.CustomerType != GLOBALRELAY_CUSTOMER_TYPE_A9 && *s.GlobalRelaySettings.CustomerType != GLOBALRELAY_CUSTOMER_TYPE_A10) { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.customer_type.app_error", nil, "", http.StatusBadRequest) - } else if s.GlobalRelaySettings.EmailAddress == nil || !strings.Contains(*s.GlobalRelaySettings.EmailAddress, "@") { - // validating email addresses is hard - just make sure it contains an '@' sign - // see https://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.email_address.app_error", nil, "", http.StatusBadRequest) - } else if s.GlobalRelaySettings.SmtpUsername == nil || *s.GlobalRelaySettings.SmtpUsername == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.smtp_username.app_error", nil, "", http.StatusBadRequest) - } else if s.GlobalRelaySettings.SmtpPassword == nil || *s.GlobalRelaySettings.SmtpPassword == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.smtp_password.app_error", nil, "", http.StatusBadRequest) - } - } - } - return nil -} - -func (s *DisplaySettings) isValid() *AppError { - if len(s.CustomUrlSchemes) != 0 { - validProtocolPattern := regexp.MustCompile(`(?i)^\s*[A-Za-z][A-Za-z0-9.+-]*\s*$`) - - for _, scheme := range s.CustomUrlSchemes { - if !validProtocolPattern.MatchString(scheme) { - return NewAppError( - "Config.IsValid", - "model.config.is_valid.display.custom_url_schemes.app_error", - map[string]interface{}{"Scheme": scheme}, - "", - http.StatusBadRequest, - ) - } - } - } - - return nil -} - -func (s *ImageProxySettings) isValid() *AppError { - if *s.Enable { - switch *s.ImageProxyType { - case IMAGE_PROXY_TYPE_LOCAL: - // No other settings to validate - case IMAGE_PROXY_TYPE_ATMOS_CAMO: - if *s.RemoteImageProxyURL == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.atmos_camo_image_proxy_url.app_error", nil, "", http.StatusBadRequest) - } - - if *s.RemoteImageProxyOptions == "" { - return NewAppError("Config.IsValid", "model.config.is_valid.atmos_camo_image_proxy_options.app_error", nil, "", http.StatusBadRequest) - } - default: - return NewAppError("Config.IsValid", "model.config.is_valid.image_proxy_type.app_error", nil, "", http.StatusBadRequest) - } - } - - return nil -} - -func (o *Config) GetSanitizeOptions() map[string]bool { - options := map[string]bool{} - options["fullname"] = *o.PrivacySettings.ShowFullName - options["email"] = *o.PrivacySettings.ShowEmailAddress - - return options -} - -func (o *Config) Sanitize() { - if o.LdapSettings.BindPassword != nil && len(*o.LdapSettings.BindPassword) > 0 { - *o.LdapSettings.BindPassword = FAKE_SETTING - } - - *o.FileSettings.PublicLinkSalt = FAKE_SETTING - - if len(*o.FileSettings.AmazonS3SecretAccessKey) > 0 { - *o.FileSettings.AmazonS3SecretAccessKey = FAKE_SETTING - } - - if o.EmailSettings.SMTPPassword != nil && len(*o.EmailSettings.SMTPPassword) > 0 { - *o.EmailSettings.SMTPPassword = FAKE_SETTING - } - - if len(*o.GitLabSettings.Secret) > 0 { - *o.GitLabSettings.Secret = FAKE_SETTING - } - - *o.SqlSettings.DataSource = FAKE_SETTING - *o.SqlSettings.AtRestEncryptKey = FAKE_SETTING - - *o.ElasticsearchSettings.Password = FAKE_SETTING - - for i := range o.SqlSettings.DataSourceReplicas { - o.SqlSettings.DataSourceReplicas[i] = FAKE_SETTING - } - - for i := range o.SqlSettings.DataSourceSearchReplicas { - o.SqlSettings.DataSourceSearchReplicas[i] = FAKE_SETTING - } -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/data_retention_policy.go b/vendor/github.com/mattermost/mattermost-server/v5/model/data_retention_policy.go deleted file mode 100644 index a39ff911..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/data_retention_policy.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type DataRetentionPolicy struct { - MessageDeletionEnabled bool `json:"message_deletion_enabled"` - FileDeletionEnabled bool `json:"file_deletion_enabled"` - MessageRetentionCutoff int64 `json:"message_retention_cutoff"` - FileRetentionCutoff int64 `json:"file_retention_cutoff"` -} - -func (me *DataRetentionPolicy) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func DataRetentionPolicyFromJson(data io.Reader) *DataRetentionPolicy { - var me *DataRetentionPolicy - json.NewDecoder(data).Decode(&me) - return me -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_data.go b/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_data.go deleted file mode 100644 index 807f6abb..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_data.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -var SystemEmojis = map[string]string{"policewoman": "1f46e-200d-2640-fe0f", "family_man_girl_medium_skin_tone": "1f468-1f3fd", "man_technologist": "1f468-200d-1f4bb", "family_woman_girl_medium_light_skin_tone": "1f469-1f3fc", "massage_woman_medium_light_skin_tone": "1f486-1f3fc-200d-2640-fe0f", "family_woman_woman_boy": "1f469-200d-1f469-200d-1f466", "rice_scene": "1f391", "notes": "1f3b6", "burundi": "1f1e7-1f1ee", "woman_medium_skin_tone": "1f469-1f3fd", "tipping_hand_man_medium_dark_skin_tone": "1f481-1f3fe-200d-2642-fe0f", "new_moon": "1f311", "belize": "1f1e7-1f1ff", "bhutan": "1f1e7-1f1f9", "eu": "1f1ea-1f1fa", "point_up_dark_skin_tone": "261d-1f3ff", "older_man_medium_light_skin_tone": "1f474-1f3fc", "prince": "1f934", "walking_man": "1f6b6", "telephone_receiver": "1f4de", "arrow_upper_right": "2197-fe0f", "taiwan": "1f1f9-1f1fc", "-1_light_skin_tone": "1f44e-1f3fb", "bear": "1f43b", "derelict_house": "1f3da", "blue_book": "1f4d8", "ok": "1f197", "woman_farmer_medium_light_skin_tone": "1f469-1f3fc", "man_shrugging_light_skin_tone": "1f937-1f3fb-200d-2642-fe0f", "dancing_women": "1f46f", "cd": "1f4bf", "tada": "1f389", "virgo": "264d-fe0f", "white_flower": "1f4ae", "guardswoman_medium_dark_skin_tone": "1f482-1f3fe-200d-2640-fe0f", "performing_arts": "1f3ad", "prayer_beads": "1f4ff", "congo_brazzaville": "1f1e8-1f1ec", "point_down_medium_skin_tone": "1f447-1f3fd", "raised_hand_with_fingers_splayed_light_skin_tone": "1f590-1f3fb", "man_playing_water_polo_medium_skin_tone": "1f93d-1f3fd-200d-2642-fe0f", "four_leaf_clover": "1f340", "microphone": "1f3a4", "heartpulse": "1f497", "north_korea": "1f1f0-1f1f5", "neutral_face": "1f610", "volleyball": "1f3d0", "man_playing_water_polo": "1f93d-200d-2642-fe0f", "uk": "1f1ec-1f1e7", "wallis_futuna": "1f1fc-1f1eb", "earth_africa": "1f30d", "droplet": "1f4a7", "construction_worker_man_medium_dark_skin_tone": "1f477-1f3fe-200d-2640-fe0f", "family_woman_woman_girl_boy_medium_light_skin_tone": "1f469-1f3fc", "mountain_biking_man_medium_dark_skin_tone": "1f6b5-1f3fe-200d-2640-fe0f", "vulcan_salute_light_skin_tone": "1f596-1f3fb", "woman_shrugging_dark_skin_tone": "1f937-1f3ff-200d-2640-fe0f", "walking_man_medium_light_skin_tone": "1f6b6-1f3fc-200d-2640-fe0f", "wave": "1f44b", "framed_picture": "1f5bc", "mag": "1f50d", "fist_left": "1f91b", "building_construction": "1f3d7", "clock9": "1f558", "cayman_islands": "1f1f0-1f1fe", "laos": "1f1f1-1f1e6", "woman_playing_handball_dark_skin_tone": "1f93e-1f3ff-200d-2640-fe0f", "man_office_worker": "1f468-200d-1f4bc", "family_man_woman_girl": "1f468-200d-1f469-200d-1f467", "wilted_flower": "1f940", "books": "1f4da", "rage": "1f621", "rice_ball": "1f359", "desert": "1f3dc", "malta": "1f1f2-1f1f9", "haircut_woman_dark_skin_tone": "1f487-1f3ff-200d-2640-fe0f", "symbols": "1f523", "marshall_islands": "1f1f2-1f1ed", "sierra_leone": "1f1f8-1f1f1", "crossed_fingers_medium_dark_skin_tone": "1f91e-1f3fe", "man_judge_medium_skin_tone": "1f468-1f3fd", "bamboo": "1f38d", "keyboard": "2328-fe0f", "clock10": "1f559", "massage_man_medium_skin_tone": "1f486-1f3fd-200d-2642-fe0f", "tipping_hand_man_dark_skin_tone": "1f481-1f3ff-200d-2642-fe0f", "man_facepalming_light_skin_tone": "1f926-1f3fb-200d-2642-fe0f", "train": "1f68b", "traffic_light": "1f6a5", "vietnam": "1f1fb-1f1f3", "boy_medium_light_skin_tone": "1f466-1f3fc", "man_farmer_light_skin_tone": "1f468-1f3fb", "man_singer_medium_dark_skin_tone": "1f468-1f3fe", "woman_cartwheeling_medium_light_skin_tone": "1f938-1f3fc-200d-2640-fe0f", "top": "1f51d", "gb": "1f1ec-1f1e7", "mouse2": "1f401", "do_not_litter": "1f6af", "south_sudan": "1f1f8-1f1f8", "bowing_woman_light_skin_tone": "1f647-1f3fb-200d-2640-fe0f", "family_man_man_girl_girl_medium_light_skin_tone": "1f468-1f3fc", "japanese_goblin": "1f47a", "camel": "1f42b", "taurus": "2649-fe0f", "mute": "1f507", "woman_mechanic_medium_dark_skin_tone": "1f469-1f3fe", "surfer": "1f3c4", "tipping_hand_man": "1f481-200d-2642-fe0f", "family_woman_woman_boy_boy": "1f469-200d-1f469-200d-1f466-200d-1f466", "floppy_disk": "1f4be", "atm": "1f3e7", "clock230": "1f55d", "prince_light_skin_tone": "1f934-1f3fb", "name_badge": "1f4db", "octocat": "octocat", "family_woman_woman_girl_girl_dark_skin_tone": "1f469-1f3ff", "christmas_tree": "1f384", "waxing_gibbous_moon": "1f314", "mountain_cableway": "1f6a0", "woman_scientist_medium_dark_skin_tone": "1f469-1f3fe", "haircut_man_medium_dark_skin_tone": "1f487-1f3fe-200d-2642-fe0f", "basketball_woman_medium_dark_skin_tone": "26f9-1f3fe-200d-2640-fe0f", "family_man_man_boy_medium_light_skin_tone": "1f468-1f3fc", "rowing_woman_medium_dark_skin_tone": "1f6a3-1f3fe-200d-2640-fe0f", "bowling": "1f3b3", "shinto_shrine": "26e9", "round_pushpin": "1f4cd", "cyprus": "1f1e8-1f1fe", "open_hands_dark_skin_tone": "1f450-1f3ff", "clap_medium_skin_tone": "1f44f-1f3fd", "bath_medium_dark_skin_tone": "1f6c0-1f3fe", "briefcase": "1f4bc", "tiger": "1f42f", "morocco": "1f1f2-1f1e6", "open_hands_light_skin_tone": "1f450-1f3fb", "hand_light_skin_tone": "270b-1f3fb", "weight_lifting_man_medium_dark_skin_tone": "1f3cb-1f3fe-200d-2640-fe0f", "mans_shoe": "1f45e", "poland": "1f1f5-1f1f1", "raised_hands_medium_skin_tone": "1f64c-1f3fd", "family_man_woman_girl_boy_light_skin_tone": "1f468-1f3fb", "woman_playing_handball_medium_skin_tone": "1f93e-1f3fd-200d-2640-fe0f", "office": "1f3e2", "woman_singer_medium_dark_skin_tone": "1f469-1f3fe", "family_woman_woman_boy_boy_medium_light_skin_tone": "1f469-1f3fc", "scorpion": "1f982", "tomato": "1f345", "goal_net": "1f945", "chad": "1f1f9-1f1e9", "family_man_woman_girl_boy_medium_skin_tone": "1f468-1f3fd", "mountain_biking_man_light_skin_tone": "1f6b5-1f3fb-200d-2640-fe0f", "weight_lifting_man_dark_skin_tone": "1f3cb-1f3ff-200d-2640-fe0f", "eyeglasses": "1f453", "golfing_woman": "1f3cc-fe0f-200d-2640-fe0f", "dvd": "1f4c0", "clipboard": "1f4cb", "ireland": "1f1ee-1f1ea", "woman_student_dark_skin_tone": "1f469-1f3ff", "angry": "1f620", "baby": "1f476", "women_wrestling": "1f93c-200d-2640-fe0f", "black_square_button": "1f532", "male_detective_medium_light_skin_tone": "1f575-1f3fc-200d-2640-fe0f", "dancer_dark_skin_tone": "1f483-1f3ff", "id": "1f194", "vibration_mode": "1f4f3", "handshake": "1f91d", "tiger2": "1f405", "leaves": "1f343", "baseball": "26be-fe0f", "golf": "26f3-fe0f", "toilet": "1f6bd", "male_detective_dark_skin_tone": "1f575-1f3ff-200d-2640-fe0f", "family_woman_boy": "1f469-200d-1f466", "duck": "1f986", "writing_hand_medium_dark_skin_tone": "270d-1f3fe", "woman_singer_medium_light_skin_tone": "1f469-1f3fc", "man_teacher_medium_skin_tone": "1f468-1f3fd", "lips": "1f444", "octopus": "1f419", "policeman_medium_dark_skin_tone": "1f46e-1f3fe-200d-2640-fe0f", "man_factory_worker_dark_skin_tone": "1f468-1f3ff", "man_astronaut_light_skin_tone": "1f468-1f3fb", "ok_man_dark_skin_tone": "1f646-1f3ff-200d-2642-fe0f", "couple_with_heart": "1f491", "pray_medium_skin_tone": "1f64f-1f3fd", "woman_health_worker_light_skin_tone": "1f469-1f3fb", "tipping_hand_woman_medium_light_skin_tone": "1f481-1f3fc-200d-2640-fe0f", "no_good_woman_dark_skin_tone": "1f645-1f3ff-200d-2640-fe0f", "no_good_woman_medium_dark_skin_tone": "1f645-1f3fe-200d-2640-fe0f", "thumbsdown": "1f44e", "fist": "270a", "camera_flash": "1f4f8", "azerbaijan": "1f1e6-1f1ff", "woman_with_turban_medium_light_skin_tone": "1f473-1f3fc-200d-2640-fe0f", "man_singer_medium_skin_tone": "1f468-1f3fd", "mali": "1f1f2-1f1f1", "blonde_woman_medium_dark_skin_tone": "1f471-1f3fe-200d-2640-fe0f", "family_man_man_girl_light_skin_tone": "1f468-1f3fb", "biking_woman_medium_skin_tone": "1f6b4-1f3fd-200d-2640-fe0f", "crab": "1f980", "green_salad": "1f957", "men_wrestling": "1f93c-200d-2642-fe0f", "-1_medium_dark_skin_tone": "1f44e-1f3fe", "baby_dark_skin_tone": "1f476-1f3ff", "surfing_woman_light_skin_tone": "1f3c4-1f3fb-200d-2640-fe0f", "stuck_out_tongue_winking_eye": "1f61c", "plate_with_cutlery": "1f37d", "swimmer": "1f3ca", "blonde_woman_medium_light_skin_tone": "1f471-1f3fc-200d-2640-fe0f", "curly_loop": "27b0", "india": "1f1ee-1f1f3", "norfolk_island": "1f1f3-1f1eb", "mountain_biking_woman_dark_skin_tone": "1f6b5-1f3ff-200d-2640-fe0f", "ghost": "1f47b", "boar": "1f417", "railway_track": "1f6e4", "100": "1f4af", "metal_medium_dark_skin_tone": "1f918-1f3fe", "woman_playing_handball_light_skin_tone": "1f93e-1f3fb-200d-2640-fe0f", "barber": "1f488", "clock730": "1f562", "equatorial_guinea": "1f1ec-1f1f6", "maldives": "1f1f2-1f1fb", "weight_lifting_woman_medium_dark_skin_tone": "1f3cb-1f3fe-200d-2640-fe0f", "eagle": "1f985", "tea": "1f375", "tanabata_tree": "1f38b", "night_with_stars": "1f303", "balloon": "1f388", "on": "1f51b", "lizard": "1f98e", "beer": "1f37a", "part_alternation_mark": "303d-fe0f", "white_square_button": "1f533", "clock430": "1f55f", "gibraltar": "1f1ec-1f1ee", "massage_woman_medium_skin_tone": "1f486-1f3fd-200d-2640-fe0f", "sweat": "1f613", "athletic_shoe": "1f45f", "joystick": "1f579", "biohazard": "2623-fe0f", "muscle_medium_light_skin_tone": "1f4aa-1f3fc", "bride_with_veil_medium_light_skin_tone": "1f470-1f3fc", "parasol_on_ground": "26f1", "costa_rica": "1f1e8-1f1f7", "woman_student_light_skin_tone": "1f469-1f3fb", "massage_woman_medium_dark_skin_tone": "1f486-1f3fe-200d-2640-fe0f", "surfing_woman_medium_dark_skin_tone": "1f3c4-1f3fe-200d-2640-fe0f", "rowing_woman": "1f6a3-200d-2640-fe0f", "guardsman_light_skin_tone": "1f482-1f3fb-200d-2640-fe0f", "construction_worker_man_light_skin_tone": "1f477-1f3fb-200d-2640-fe0f", "family_woman_boy_boy": "1f469-200d-1f466-200d-1f466", "small_airplane": "1f6e9", "baggage_claim": "1f6c4", "bosnia_herzegovina": "1f1e7-1f1e6", "falkland_islands": "1f1eb-1f1f0", "crossed_fingers_medium_light_skin_tone": "1f91e-1f3fc", "man_scientist_medium_light_skin_tone": "1f468-1f3fc", "family_woman_girl_boy_medium_skin_tone": "1f469-1f3fd", "satisfied": "1f606", "u5408": "1f234", "cn": "1f1e8-1f1f3", "isle_of_man": "1f1ee-1f1f2", "fist_raised_medium_light_skin_tone": "270a-1f3fc", "family_man_woman_girl_dark_skin_tone": "1f468-1f3ff", "family_woman_girl_dark_skin_tone": "1f469-1f3ff", "family_man_boy_medium_skin_tone": "1f468-1f3fd", "money_mouth_face": "1f911", "syringe": "1f489", "hand_medium_light_skin_tone": "270b-1f3fc", "writing_hand_medium_skin_tone": "270d-1f3fd", "man_farmer_medium_light_skin_tone": "1f468-1f3fc", "woman_artist_dark_skin_tone": "1f469-1f3ff", "tickets": "1f39f", "man_cartwheeling_light_skin_tone": "1f938-1f3fb-200d-2642-fe0f", "squid": "1f991", "fish": "1f41f", "memo": "1f4dd", "eye_speech_bubble": "1f441-200d-1f5e8", "+1_light_skin_tone": "1f44d-1f3fb", "tulip": "1f337", "blossom": "1f33c", "family_woman_woman_girl_medium_dark_skin_tone": "1f469-1f3fe", "triumph": "1f624", "rooster": "1f413", "ng": "1f196", "blonde_man_medium_light_skin_tone": "1f471-1f3fc-200d-2640-fe0f", "policeman_light_skin_tone": "1f46e-1f3fb-200d-2640-fe0f", "woman_cook_dark_skin_tone": "1f469-1f3ff", "pray_dark_skin_tone": "1f64f-1f3ff", "point_up_2_medium_skin_tone": "1f446-1f3fd", "busts_in_silhouette": "1f465", "tornado": "1f32a", "woman_juggling": "1f939-200d-2640-fe0f", "cupid": "1f498", "white_check_mark": "2705", "aruba": "1f1e6-1f1fc", "family_man_boy_boy_medium_dark_skin_tone": "1f468-1f3fe", "woman_cartwheeling_dark_skin_tone": "1f938-1f3ff-200d-2640-fe0f", "woman_playing_water_polo_dark_skin_tone": "1f93d-1f3ff-200d-2640-fe0f", "relaxed": "263a-fe0f", "birthday": "1f382", "high_brightness": "1f506", "couple_with_heart_woman_woman_medium_light_skin_tone": "1f469-1f3fc", "biking_man_dark_skin_tone": "1f6b4-1f3ff-200d-2640-fe0f", "disappointed_relieved": "1f625", "canary_islands": "1f1ee-1f1e8", "st_pierre_miquelon": "1f1f5-1f1f2", "trinidad_tobago": "1f1f9-1f1f9", "turkmenistan": "1f1f9-1f1f2", "pouting_woman_medium_skin_tone": "1f64e-1f3fd-200d-2640-fe0f", "man_student_dark_skin_tone": "1f468-1f3ff", "princess_dark_skin_tone": "1f478-1f3ff", "family_woman_boy_boy_medium_skin_tone": "1f469-1f3fd", "old_key": "1f5dd", "muscle_medium_skin_tone": "1f4aa-1f3fd", "ear_medium_dark_skin_tone": "1f442-1f3fe", "girl_medium_dark_skin_tone": "1f467-1f3fe", "man_pilot_dark_skin_tone": "1f468-1f3ff", "wolf": "1f43a", "gem": "1f48e", "arrow_double_up": "23eb", "woman_factory_worker_light_skin_tone": "1f469-1f3fb", "woman_mechanic_medium_skin_tone": "1f469-1f3fd", "woman_firefighter_light_skin_tone": "1f469-1f3fb", "sunglasses": "1f60e", "snake": "1f40d", "pen": "1f58a", "nose_medium_skin_tone": "1f443-1f3fd", "weight_lifting_man": "1f3cb-fe0f", "alarm_clock": "23f0", "golfing_woman_medium_light_skin_tone": "1f3cc-1f3fc-200d-2640-fe0f", "vulcan_salute": "1f596", "earth_asia": "1f30f", "+1_dark_skin_tone": "1f44d-1f3ff", "family_woman_boy_medium_dark_skin_tone": "1f469-1f3fe", "family_woman_girl_girl_light_skin_tone": "1f469-1f3fb", "weight_lifting_man_medium_light_skin_tone": "1f3cb-1f3fc-200d-2640-fe0f", "angel": "1f47c", "peach": "1f351", "truck": "1f69a", "tajikistan": "1f1f9-1f1ef", "tr": "1f1f9-1f1f7", "running_woman_medium_skin_tone": "1f3c3-1f3fd-200d-2640-fe0f", "wrench": "1f527", "black_flag": "1f3f4", "cape_verde": "1f1e8-1f1fb", "man_technologist_medium_light_skin_tone": "1f468-1f3fc", "mountain_biking_woman_medium_light_skin_tone": "1f6b5-1f3fc-200d-2640-fe0f", "man_astronaut_medium_skin_tone": "1f468-1f3fd", "man_in_tuxedo_medium_light_skin_tone": "1f935-1f3fc", "see_no_evil": "1f648", "egg": "1f95a", "1234": "1f522", "lesotho": "1f1f1-1f1f8", "middle_finger_medium_skin_tone": "1f595-1f3fd", "woman_health_worker_medium_light_skin_tone": "1f469-1f3fc", "sneezing_face": "1f927", "man_cook": "1f468-200d-1f373", "mortar_board": "1f393", "candle": "1f56f", "basketball_man_medium_skin_tone": "26f9-1f3fd-200d-2640-fe0f", "ferris_wheel": "1f3a1", "martinique": "1f1f2-1f1f6", "st_vincent_grenadines": "1f1fb-1f1e8", "yemen": "1f1fe-1f1ea", "pray_light_skin_tone": "1f64f-1f3fb", "man_in_tuxedo_medium_skin_tone": "1f935-1f3fd", "woman_pilot_medium_dark_skin_tone": "1f469-1f3fe", "pregnant_woman_dark_skin_tone": "1f930-1f3ff", "fu": "1f595", "haircut": "1f487", "boxing_glove": "1f94a", "page_with_curl": "1f4c3", "muscle_light_skin_tone": "1f4aa-1f3fb", "woman_firefighter_dark_skin_tone": "1f469-1f3ff", "ok_woman_light_skin_tone": "1f646-1f3fb-200d-2640-fe0f", "family_man_woman_boy_boy_medium_light_skin_tone": "1f468-1f3fc", "family_woman_boy_dark_skin_tone": "1f469-1f3ff", "weight_lifting_man_light_skin_tone": "1f3cb-1f3fb-200d-2640-fe0f", "blonde_man": "1f471", "woman_technologist": "1f469-200d-1f4bb", "boom": "1f4a5", "1st_place_medal": "1f947", "nine": "0039-fe0f-20e3", "czech_republic": "1f1e8-1f1ff", "meat_on_bone": "1f356", "hamburger": "1f354", "video_game": "1f3ae", "clock2": "1f551", "woman_facepalming_dark_skin_tone": "1f926-1f3ff-200d-2640-fe0f", "couplekiss_man_man_medium_dark_skin_tone": "1f468-1f3fe", "arrow_lower_right": "2198-fe0f", "haircut_woman_light_skin_tone": "1f487-1f3fb-200d-2640-fe0f", "woman_dark_skin_tone": "1f469-1f3ff", "older_man_light_skin_tone": "1f474-1f3fb", "first_quarter_moon_with_face": "1f31b", "fries": "1f35f", "restroom": "1f6bb", "zero": "0030-fe0f-20e3", "fr": "1f1eb-1f1f7", "kuwait": "1f1f0-1f1fc", "man_health_worker_medium_skin_tone": "1f468-1f3fd", "woman_judge_light_skin_tone": "1f469-1f3fb", "man_judge_dark_skin_tone": "1f468-1f3ff", "ok_woman_medium_skin_tone": "1f646-1f3fd-200d-2640-fe0f", "bike": "1f6b2", "registered": "00ae-fe0f", "blonde_woman_medium_skin_tone": "1f471-1f3fd-200d-2640-fe0f", "stuck_out_tongue_closed_eyes": "1f61d", "collision": "1f4a5", "wheelchair": "267f-fe0f", "black_circle": "26ab-fe0f", "point_up_2_light_skin_tone": "1f446-1f3fb", "older_man": "1f474", "suspension_railway": "1f69f", "libra": "264e-fe0f", "crossed_flags": "1f38c", "man_cartwheeling_medium_dark_skin_tone": "1f938-1f3fe-200d-2642-fe0f", "man_playing_water_polo_dark_skin_tone": "1f93d-1f3ff-200d-2642-fe0f", "scream": "1f631", "no_good_man": "1f645-200d-2642-fe0f", "timer_clock": "23f2", "venezuela": "1f1fb-1f1ea", "raised_back_of_hand_light_skin_tone": "1f91a-1f3fb", "woman_technologist_medium_skin_tone": "1f469-1f3fd", "popcorn": "1f37f", "romania": "1f1f7-1f1f4", "togo": "1f1f9-1f1ec", "writing_hand_dark_skin_tone": "270d-1f3ff", "woman_singer_dark_skin_tone": "1f469-1f3ff", "pouting_woman_medium_dark_skin_tone": "1f64e-1f3fe-200d-2640-fe0f", "man_health_worker_light_skin_tone": "1f468-1f3fb", "dancer_medium_light_skin_tone": "1f483-1f3fc", "phone": "260e-fe0f", "chart": "1f4b9", "repeat": "1f501", "mahjong": "1f004-fe0f", "liberia": "1f1f1-1f1f7", "rage3": "rage3", "person_frowning": "1f64d", "open_hands_medium_skin_tone": "1f450-1f3fd", "man_dark_skin_tone": "1f468-1f3ff", "man_factory_worker_medium_light_skin_tone": "1f468-1f3fc", "man_astronaut_medium_light_skin_tone": "1f468-1f3fc", "ring": "1f48d", "ok_hand_medium_skin_tone": "1f44c-1f3fd", "santa": "1f385", "beach_umbrella": "1f3d6", "finland": "1f1eb-1f1ee", "woman_facepalming_medium_skin_tone": "1f926-1f3fd-200d-2640-fe0f", "woman_shrugging_medium_light_skin_tone": "1f937-1f3fc-200d-2640-fe0f", "sunflower": "1f33b", "ok_hand_dark_skin_tone": "1f44c-1f3ff", "santa_medium_skin_tone": "1f385-1f3fd", "call_me_hand_medium_skin_tone": "1f919-1f3fd", "man_firefighter_light_skin_tone": "1f468-1f3fb", "kiss": "1f48b", "mandarin": "1f34a", "dollar": "1f4b5", "clock3": "1f552", "argentina": "1f1e6-1f1f7", "fist_left_light_skin_tone": "1f91b-1f3fb", "santa_light_skin_tone": "1f385-1f3fb", "family_man_girl_boy_medium_light_skin_tone": "1f468-1f3fc", "sushi": "1f363", "rice": "1f35a", "mailbox_with_mail": "1f4ec", "woman_cook_medium_dark_skin_tone": "1f469-1f3fe", "family_man_woman_girl_girl_medium_dark_skin_tone": "1f468-1f3fe", "straight_ruler": "1f4cf", "blue_heart": "1f499", "slightly_frowning_face": "1f641", "crossed_fingers": "1f91e", "seedling": "1f331", "herb": "1f33f", "medal_military": "1f396", "camping": "1f3d5", "arrow_backward": "25c0-fe0f", "heavy_multiplication_x": "2716-fe0f", "icecream": "1f366", "heavy_dollar_sign": "1f4b2", "frowning_woman_dark_skin_tone": "1f64d-1f3ff-200d-2640-fe0f", "family_man_woman_boy_boy_medium_dark_skin_tone": "1f468-1f3fe", "brazil": "1f1e7-1f1f7", "fist_right_medium_light_skin_tone": "1f91c-1f3fc", "man_scientist_medium_dark_skin_tone": "1f468-1f3fe", "family_woman_girl_light_skin_tone": "1f469-1f3fb", "swimming_man_medium_dark_skin_tone": "1f3ca-1f3fe-200d-2640-fe0f", "man": "1f468", "lemon": "1f34b", "japanese_castle": "1f3ef", "cinema": "1f3a6", "wave_light_skin_tone": "1f44b-1f3fb", "middle_finger_medium_light_skin_tone": "1f595-1f3fc", "five": "0035-fe0f-20e3", "boy_medium_dark_skin_tone": "1f466-1f3fe", "woman_technologist_dark_skin_tone": "1f469-1f3ff", "man_playing_handball_light_skin_tone": "1f93e-1f3fb-200d-2642-fe0f", "construction_worker_man": "1f477", "stadium": "1f3df", "biking_woman_dark_skin_tone": "1f6b4-1f3ff-200d-2640-fe0f", "trophy": "1f3c6", "arrow_left": "2b05-fe0f", "boy_dark_skin_tone": "1f466-1f3ff", "no_good_woman_light_skin_tone": "1f645-1f3fb-200d-2640-fe0f", "skull_and_crossbones": "2620-fe0f", "couple_with_heart_woman_woman": "1f469-200d-2764-fe0f-200d-1f469", "no_bicycles": "1f6b3", "bell": "1f514", "feelsgood": "feelsgood", "bowing_man_dark_skin_tone": "1f647-1f3ff-200d-2640-fe0f", "mouse": "1f42d", "anchor": "2693-fe0f", "cyclone": "1f300", "solomon_islands": "1f1f8-1f1e7", "basketball": "1f3c0", "notebook_with_decorative_cover": "1f4d4", "family_man_girl_girl_dark_skin_tone": "1f468-1f3ff", "singapore": "1f1f8-1f1ec", "golfing_man_medium_dark_skin_tone": "1f3cc-1f3fe-200d-2640-fe0f", "woman": "1f469", "fried_shrimp": "1f364", "construction": "1f6a7", "eight_pointed_black_star": "2734-fe0f", "black_joker": "1f0cf", "cambodia": "1f1f0-1f1ed", "mount_fuji": "1f5fb", "link": "1f517", "womens": "1f6ba", "family_man_man_boy_boy_medium_skin_tone": "1f468-1f3fd", "joy": "1f602", "crying_cat_face": "1f63f", "parking": "1f17f-fe0f", "barbados": "1f1e7-1f1e7", "bowing_woman_dark_skin_tone": "1f647-1f3ff-200d-2640-fe0f", "angel_medium_light_skin_tone": "1f47c-1f3fc", "waning_gibbous_moon": "1f316", "synagogue": "1f54d", "american_samoa": "1f1e6-1f1f8", "basketball_woman_medium_light_skin_tone": "26f9-1f3fc-200d-2640-fe0f", "bug": "1f41b", "woman_farmer_light_skin_tone": "1f469-1f3fb", "door": "1f6aa", "place_of_worship": "1f6d0", "eight_spoked_asterisk": "2733-fe0f", "mrs_claus_dark_skin_tone": "1f936-1f3ff", "u7a7a": "1f233", "man_astronaut_medium_dark_skin_tone": "1f468-1f3fe", "jack_o_lantern": "1f383", "lock_with_ink_pen": "1f50f", "male_detective_medium_skin_tone": "1f575-1f3fd-200d-2640-fe0f", "woman_firefighter_medium_dark_skin_tone": "1f469-1f3fe", "smiling_imp": "1f608", "tv": "1f4fa", "pouting_man": "1f64e-200d-2642-fe0f", "e-mail": "1f4e7", "package": "1f4e6", "clock130": "1f55c", "family_man_boy_light_skin_tone": "1f468-1f3fb", "cat2": "1f408", "mountain_biking_woman_medium_dark_skin_tone": "1f6b5-1f3fe-200d-2640-fe0f", "nauseated_face": "1f922", "fountain": "26f2-fe0f", "middle_finger": "1f595", "dancers": "1f46f", "cactus": "1f335", "man_student_light_skin_tone": "1f468-1f3fb", "family_man_man_girl_dark_skin_tone": "1f468-1f3ff", "family_man_girl_girl_medium_skin_tone": "1f468-1f3fd", "us_virgin_islands": "1f1fb-1f1ee", "woman_astronaut_medium_dark_skin_tone": "1f469-1f3fe", "honeybee": "1f41d", "bouquet": "1f490", "golfing_man": "1f3cc-fe0f", "u7981": "1f232", "french_guiana": "1f1ec-1f1eb", "kenya": "1f1f0-1f1ea", "melon": "1f348", "nicaragua": "1f1f3-1f1ee", "raised_hand_with_fingers_splayed_medium_light_skin_tone": "1f590-1f3fc", "bath_light_skin_tone": "1f6c0-1f3fb", "man_pilot_medium_light_skin_tone": "1f468-1f3fc", "european_post_office": "1f3e4", "mobile_phone_off": "1f4f4", "no_smoking": "1f6ad", "family_woman_girl_girl_medium_light_skin_tone": "1f469-1f3fc", "family_woman_girl_medium_dark_skin_tone": "1f469-1f3fe", "man_juggling_medium_dark_skin_tone": "1f939-1f3fe-200d-2642-fe0f", "expressionless": "1f611", "school_satchel": "1f392", "film_strip": "1f39e", "running_man_light_skin_tone": "1f3c3-1f3fb-200d-2640-fe0f", "family_man_woman_girl_medium_dark_skin_tone": "1f468-1f3fe", "family_woman_woman_boy_boy_light_skin_tone": "1f469-1f3fb", "smiley_cat": "1f63a", "chestnut": "1f330", "girl_dark_skin_tone": "1f467-1f3ff", "bride_with_veil_medium_dark_skin_tone": "1f470-1f3fe", "family_man_man_boy_boy_light_skin_tone": "1f468-1f3fb", "family_man_boy_boy_medium_light_skin_tone": "1f468-1f3fc", "wink": "1f609", "carrot": "1f955", "credit_card": "1f4b3", "triangular_ruler": "1f4d0", "question": "2753", "+1_medium_dark_skin_tone": "1f44d-1f3fe", "man_teacher_medium_dark_skin_tone": "1f468-1f3fe", "family_man_girl_girl_medium_light_skin_tone": "1f468-1f3fc", "kimono": "1f458", "bellhop_bell": "1f6ce", "red_circle": "1f534", "call_me_hand_light_skin_tone": "1f919-1f3fb", "nail_care_medium_skin_tone": "1f485-1f3fd", "woman_teacher_medium_skin_tone": "1f469-1f3fd", "woman_juggling_dark_skin_tone": "1f939-1f3ff-200d-2640-fe0f", "runner": "1f3c3", "heavy_check_mark": "2714-fe0f", "family_man_girl_girl_medium_dark_skin_tone": "1f468-1f3fe", "tent": "26fa-fe0f", "card_index_dividers": "1f5c2", "man_singer_dark_skin_tone": "1f468-1f3ff", "man_firefighter_medium_dark_skin_tone": "1f468-1f3fe", "man_playing_handball_medium_dark_skin_tone": "1f93e-1f3fe-200d-2642-fe0f", "female_detective_medium_dark_skin_tone": "1f575-1f3fe-200d-2640-fe0f", "metal": "1f918", "dark_sunglasses": "1f576", "vertical_traffic_light": "1f6a6", "four": "0034-fe0f-20e3", "wavy_dash": "3030-fe0f", "ear_medium_light_skin_tone": "1f442-1f3fc", "man_juggling_dark_skin_tone": "1f939-1f3ff-200d-2642-fe0f", "kissing_heart": "1f618", "sweet_potato": "1f360", "gift_heart": "1f49d", "man_technologist_light_skin_tone": "1f468-1f3fb", "prince_medium_dark_skin_tone": "1f934-1f3fe", "ok_man_medium_skin_tone": "1f646-1f3fd-200d-2642-fe0f", "womans_clothes": "1f45a", "roller_coaster": "1f3a2", "woman_student_medium_light_skin_tone": "1f469-1f3fc", "zipper_mouth_face": "1f910", "person_with_blond_hair": "1f471", "leftwards_arrow_with_hook": "21a9-fe0f", "white_circle": "26aa-fe0f", "afghanistan": "1f1e6-1f1eb", "face_with_thermometer": "1f912", "bow": "1f647", "kr": "1f1f0-1f1f7", "finnadie": "finnadie", "girl_light_skin_tone": "1f467-1f3fb", "woman_farmer_medium_skin_tone": "1f469-1f3fd", "umbrella": "2614-fe0f", "ice_cream": "1f368", "point_down_medium_light_skin_tone": "1f447-1f3fc", "woman_health_worker_dark_skin_tone": "1f469-1f3ff", "rowing_woman_medium_light_skin_tone": "1f6a3-1f3fc-200d-2640-fe0f", "money_with_wings": "1f4b8", "dolls": "1f38e", "surfing_man_medium_dark_skin_tone": "1f3c4-1f3fe-200d-2640-fe0f", "mrs_claus_medium_skin_tone": "1f936-1f3fd", "basketball_man_dark_skin_tone": "26f9-1f3ff-200d-2640-fe0f", "dragon_face": "1f432", "woman_cartwheeling": "1f938-200d-2640-fe0f", "aquarius": "2652-fe0f", "sos": "1f198", "clock1230": "1f567", "haiti": "1f1ed-1f1f9", "woman_facepalming": "1f926-200d-2640-fe0f", "clinking_glasses": "1f942", "trollface": "trollface", "mrs_claus_light_skin_tone": "1f936-1f3fb", "clap": "1f44f", "couplekiss_man_man": "1f468-200d-2764-fe0f-200d-1f48b-200d-1f468", "page_facing_up": "1f4c4", "belgium": "1f1e7-1f1ea", "curacao": "1f1e8-1f1fc", "family_woman_woman_boy_dark_skin_tone": "1f469-1f3ff", "two_men_holding_hands": "1f46c", "mountain_snow": "1f3d4", "wind_chime": "1f390", "person_with_pouting_face": "1f64e", "cityscape": "1f3d9", "bride_with_veil_dark_skin_tone": "1f470-1f3ff", "frowning_woman_light_skin_tone": "1f64d-1f3fb-200d-2640-fe0f", "bath_medium_light_skin_tone": "1f6c0-1f3fc", "sheep": "1f411", "sparkler": "1f387", "frowning_woman": "1f64d", "rat": "1f400", "custard": "1f36e", "video_camera": "1f4f9", "open_umbrella": "2602-fe0f", "man_with_turban_medium_light_skin_tone": "1f473-1f3fc-200d-2640-fe0f", "woman_student_medium_dark_skin_tone": "1f469-1f3fe", "ok_woman_medium_light_skin_tone": "1f646-1f3fc-200d-2640-fe0f", "swimming_man_dark_skin_tone": "1f3ca-1f3ff-200d-2640-fe0f", "man_cook_light_skin_tone": "1f468-1f3fb", "running_woman_light_skin_tone": "1f3c3-1f3fb-200d-2640-fe0f", "rabbit": "1f430", "ox": "1f402", "corn": "1f33d", "mozambique": "1f1f2-1f1ff", "point_right_light_skin_tone": "1f449-1f3fb", "nail_care_light_skin_tone": "1f485-1f3fb", "smiley": "1f603", "new_moon_with_face": "1f31a", "croatia": "1f1ed-1f1f7", "man_judge_medium_dark_skin_tone": "1f468-1f3fe", "fist_raised": "270a", "man_astronaut": "1f468-200d-1f680", "clock1130": "1f566", "st_lucia": "1f1f1-1f1e8", "princess": "1f478", "fist_left_medium_light_skin_tone": "1f91b-1f3fc", "point_left_medium_dark_skin_tone": "1f448-1f3fe", "woman_factory_worker_medium_skin_tone": "1f469-1f3fd", "angel_dark_skin_tone": "1f47c-1f3ff", "woman_cook": "1f469-200d-1f373", "koala": "1f428", "satellite": "1f4e1", "book": "1f4d6", "large_orange_diamond": "1f536", "monaco": "1f1f2-1f1e8", "spiral_notepad": "1f5d2", "capricorn": "2651-fe0f", "bacon": "1f953", "blonde_man_medium_dark_skin_tone": "1f471-1f3fe-200d-2640-fe0f", "business_suit_levitating_dark_skin_tone": "1f574-1f3ff", "call_me_hand_medium_light_skin_tone": "1f919-1f3fc", "female_detective_medium_light_skin_tone": "1f575-1f3fc-200d-2640-fe0f", "haircut_woman_medium_light_skin_tone": "1f487-1f3fc-200d-2640-fe0f", "alien": "1f47d", "baguette_bread": "1f956", "northern_mariana_islands": "1f1f2-1f1f5", "ukraine": "1f1fa-1f1e6", "flushed": "1f633", "man_scientist": "1f468-200d-1f52c", "trident": "1f531", "family_woman_woman_girl_boy_medium_skin_tone": "1f469-1f3fd", "family_man_boy_boy": "1f468-200d-1f466-200d-1f466", "tennis": "1f3be", "fire_engine": "1f692", "pushpin": "1f4cc", "man_health_worker_medium_dark_skin_tone": "1f468-1f3fe", "boy": "1f466", "headphones": "1f3a7", "fuelpump": "26fd-fe0f", "u6709": "1f236", "man_cook_medium_skin_tone": "1f468-1f3fd", "bride_with_veil_medium_skin_tone": "1f470-1f3fd", "point_up": "261d-fe0f", "necktie": "1f454", "control_knobs": "1f39b", "austria": "1f1e6-1f1f9", "papua_new_guinea": "1f1f5-1f1ec", "alembic": "2697-fe0f", "cook_islands": "1f1e8-1f1f0", "iceland": "1f1ee-1f1f8", "car": "1f697", "potable_water": "1f6b0", "haircut_man_medium_light_skin_tone": "1f487-1f3fc-200d-2642-fe0f", "couplekiss_woman_woman_medium_light_skin_tone": "1f469-1f3fc", "couplekiss_man_man_medium_skin_tone": "1f468-1f3fd", "cookie": "1f36a", "flight_departure": "1f6eb", "muscle_dark_skin_tone": "1f4aa-1f3ff", "construction_worker_man_medium_skin_tone": "1f477-1f3fd-200d-2640-fe0f", "black_medium_small_square": "25fe-fe0f", "guyana": "1f1ec-1f1fe", "file_folder": "1f4c1", "fountain_pen": "1f58b", "construction_worker_woman_medium_skin_tone": "1f477-1f3fd-200d-2640-fe0f", "family_man_woman_girl_boy_medium_light_skin_tone": "1f468-1f3fc", "poultry_leg": "1f357", "ski": "1f3bf", "guardswoman_medium_skin_tone": "1f482-1f3fd-200d-2640-fe0f", "family_man_man_girl_medium_dark_skin_tone": "1f468-1f3fe", "family_woman_boy_light_skin_tone": "1f469-1f3fb", "trumpet": "1f3ba", "no_pedestrians": "1f6b7", "heavy_minus_sign": "2796", "fist_oncoming": "1f44a", "ambulance": "1f691", "man_artist_dark_skin_tone": "1f468-1f3ff", "drum": "1f941", "train2": "1f686", "u7121": "1f21a-fe0f", "burkina_faso": "1f1e7-1f1eb", "nose_light_skin_tone": "1f443-1f3fb", "policewoman_medium_dark_skin_tone": "1f46e-1f3fe-200d-2640-fe0f", "lantern": "1f3ee", "metal_light_skin_tone": "1f918-1f3fb", "male_detective_light_skin_tone": "1f575-1f3fb-200d-2640-fe0f", "woman_juggling_medium_dark_skin_tone": "1f939-1f3fe-200d-2640-fe0f", "rowing_man_light_skin_tone": "1f6a3-1f3fb-200d-2640-fe0f", "lying_face": "1f925", "point_left": "1f448", "rosette": "1f3f5", "houses": "1f3d8", "repeat_one": "1f502", "liechtenstein": "1f1f1-1f1ee", "cloud_with_lightning": "1f329", "man_cartwheeling": "1f938-200d-2642-fe0f", "pause_button": "23f8", "arrows_clockwise": "1f503", "raised_hand_with_fingers_splayed_dark_skin_tone": "1f590-1f3ff", "clap_dark_skin_tone": "1f44f-1f3ff", "raising_hand_man_medium_skin_tone": "1f64b-1f3fd-200d-2642-fe0f", "family_woman_woman_girl_girl_medium_dark_skin_tone": "1f469-1f3fe", "dog": "1f436", "pouting_man_medium_dark_skin_tone": "1f64e-1f3fe-200d-2642-fe0f", "surfing_woman_medium_skin_tone": "1f3c4-1f3fd-200d-2640-fe0f", "confused": "1f615", "detective": "1f575-fe0f", "studio_microphone": "1f399", "fist_oncoming_medium_light_skin_tone": "1f44a-1f3fc", "man_firefighter_medium_skin_tone": "1f468-1f3fd", "tshirt": "1f455", "trolleybus": "1f68e", "norway": "1f1f3-1f1f4", "neckbeard": "neckbeard", "three": "0033-fe0f-20e3", "point_right_medium_skin_tone": "1f449-1f3fd", "man_medium_dark_skin_tone": "1f468-1f3fe", "pouting_man_medium_light_skin_tone": "1f64e-1f3fc-200d-2642-fe0f", "clock630": "1f561", "fist_raised_medium_skin_tone": "270a-1f3fd", "anguished": "1f627", "eye": "1f441", "bride_with_veil": "1f470", "hear_no_evil": "1f649", "wine_glass": "1f377", "soon": "1f51c", "family_man_boy_boy_light_skin_tone": "1f468-1f3fb", "family_woman_boy_boy_medium_light_skin_tone": "1f469-1f3fc", "dromedary_camel": "1f42a", "chipmunk": "1f43f", "soccer": "26bd-fe0f", "man_with_gua_pi_mao_medium_light_skin_tone": "1f472-1f3fc", "business_suit_levitating_medium_light_skin_tone": "1f574-1f3fc", "running_woman_medium_dark_skin_tone": "1f3c3-1f3fe-200d-2640-fe0f", "speaking_head": "1f5e3", "menorah": "1f54e", "non-potable_water": "1f6b1", "woman_with_turban_dark_skin_tone": "1f473-1f3ff-200d-2640-fe0f", "woman_shrugging_medium_skin_tone": "1f937-1f3fd-200d-2640-fe0f", "frowning_woman_medium_light_skin_tone": "1f64d-1f3fc-200d-2640-fe0f", "biking_man_medium_light_skin_tone": "1f6b4-1f3fc-200d-2640-fe0f", "woman_shrugging": "1f937-200d-2640-fe0f", "arrow_upper_left": "2196-fe0f", "metal_medium_skin_tone": "1f918-1f3fd", "woman_factory_worker_dark_skin_tone": "1f469-1f3ff", "hotsprings": "2668-fe0f", "ear_dark_skin_tone": "1f442-1f3ff", "girl_medium_skin_tone": "1f467-1f3fd", "woman_farmer_dark_skin_tone": "1f469-1f3ff", "man_student_medium_skin_tone": "1f468-1f3fd", "biking_man_medium_skin_tone": "1f6b4-1f3fd-200d-2640-fe0f", "woman_scientist": "1f469-200d-1f52c", "vs": "1f19a", "weight_lifting_man_medium_skin_tone": "1f3cb-1f3fd-200d-2640-fe0f", "arrow_right": "27a1-fe0f", "woman_juggling_medium_light_skin_tone": "1f939-1f3fc-200d-2640-fe0f", "racehorse": "1f40e", "sun_behind_large_cloud": "1f325", "convenience_store": "1f3ea", "namibia": "1f1f3-1f1e6", "raised_hands_medium_dark_skin_tone": "1f64c-1f3fe", "man_judge_medium_light_skin_tone": "1f468-1f3fc", "dancer_medium_skin_tone": "1f483-1f3fd", "fearful": "1f628", "frog": "1f438", "shopping_cart": "1f6d2", "family_man_boy_medium_light_skin_tone": "1f468-1f3fc", "basketball_man_medium_dark_skin_tone": "26f9-1f3fe-200d-2640-fe0f", "oman": "1f1f4-1f1f2", "paraguay": "1f1f5-1f1fe", "horse": "1f434", "tram": "1f68a", "wastebasket": "1f5d1", "yen": "1f4b4", "heavy_exclamation_mark": "2757-fe0f", "arrow_double_down": "23ec", "walking_woman_dark_skin_tone": "1f6b6-1f3ff-200d-2640-fe0f", "shoe": "1f45e", "ear_of_rice": "1f33e", "mountain": "26f0", "uzbekistan": "1f1fa-1f1ff", "baby_light_skin_tone": "1f476-1f3fb", "haircut_woman_medium_dark_skin_tone": "1f487-1f3fe-200d-2640-fe0f", "golfing_woman_medium_skin_tone": "1f3cc-1f3fd-200d-2640-fe0f", "earth_americas": "1f30e", "woman_playing_water_polo_medium_light_skin_tone": "1f93d-1f3fc-200d-2640-fe0f", "walking_woman": "1f6b6-200d-2640-fe0f", "fried_egg": "1f373", "rocket": "1f680", "artificial_satellite": "1f6f0", "man_shrugging_medium_skin_tone": "1f937-1f3fd-200d-2642-fe0f", "golfing_man_dark_skin_tone": "1f3cc-1f3ff-200d-2640-fe0f", "older_woman_medium_skin_tone": "1f475-1f3fd", "man_with_gua_pi_mao_medium_dark_skin_tone": "1f472-1f3fe", "persevere": "1f623", "raising_hand_woman": "1f64b", "pig": "1f437", "european_castle": "1f3f0", "department_store": "1f3ec", "fist_right_light_skin_tone": "1f91c-1f3fb", "raising_hand_woman_dark_skin_tone": "1f64b-1f3ff-200d-2640-fe0f", "paw_prints": "1f43e", "moon": "1f314", "man_medium_skin_tone": "1f468-1f3fd", "rowing_man_dark_skin_tone": "1f6a3-1f3ff-200d-2640-fe0f", "sleepy": "1f62a", "light_rail": "1f688", "peace_symbol": "262e-fe0f", "m": "24c2-fe0f", "woman_pilot_medium_skin_tone": "1f469-1f3fd", "dango": "1f361", "minibus": "1f690", "family_man_man_girl_girl_medium_dark_skin_tone": "1f468-1f3fe", "dizzy_face": "1f635", "bowing_woman": "1f647-200d-2640-fe0f", "pig2": "1f416", "factory": "1f3ed", "small_red_triangle": "1f53a", "ok_man_light_skin_tone": "1f646-1f3fb-200d-2642-fe0f", "two_women_holding_hands": "1f46d", "funeral_urn": "26b1-fe0f", "cocos_islands": "1f1e8-1f1e8", "lipstick": "1f484", "fleur_de_lis": "269c-fe0f", "man_with_gua_pi_mao_dark_skin_tone": "1f472-1f3ff", "woman_factory_worker_medium_dark_skin_tone": "1f469-1f3fe", "no_good_man_medium_light_skin_tone": "1f645-1f3fc-200d-2642-fe0f", "horse_racing_medium_dark_skin_tone": "1f3c7-1f3fe", "clock1030": "1f565", "couplekiss_man_man_dark_skin_tone": "1f468-1f3ff", "frowning_man": "1f64d-200d-2642-fe0f", "family_woman_boy_boy_dark_skin_tone": "1f469-1f3ff", "family_man_girl_boy_light_skin_tone": "1f468-1f3fb", "smile": "1f604", "clock7": "1f556", "massage_man": "1f486-200d-2642-fe0f", "guardswoman_dark_skin_tone": "1f482-1f3ff-200d-2640-fe0f", "raising_hand_man_dark_skin_tone": "1f64b-1f3ff-200d-2642-fe0f", "woman_with_turban_medium_dark_skin_tone": "1f473-1f3fe-200d-2640-fe0f", "worried": "1f61f", "no_good": "1f645", "card_index": "1f4c7", "aland_islands": "1f1e6-1f1fd", "lion": "1f981", "hammer": "1f528", "bomb": "1f4a3", "reunion": "1f1f7-1f1ea", "walking_man_light_skin_tone": "1f6b6-1f3fb-200d-2640-fe0f", "family_woman_boy_medium_light_skin_tone": "1f469-1f3fc", "pouting_cat": "1f63e", "cow": "1f42e", "motor_scooter": "1f6f5", "hong_kong": "1f1ed-1f1f0", "family_man_girl_medium_dark_skin_tone": "1f468-1f3fe", "sailboat": "26f5-fe0f", "fiji": "1f1eb-1f1ef", "raised_hands_medium_light_skin_tone": "1f64c-1f3fc", "woman_office_worker_dark_skin_tone": "1f469-1f3ff", "family_man_woman_girl_girl_medium_light_skin_tone": "1f468-1f3fc", "arrow_up": "2b06-fe0f", "walking_woman_medium_light_skin_tone": "1f6b6-1f3fc-200d-2640-fe0f", "nose_medium_light_skin_tone": "1f443-1f3fc", "basketball_woman": "26f9-fe0f-200d-2640-fe0f", "+1_medium_light_skin_tone": "1f44d-1f3fc", "crossed_fingers_medium_skin_tone": "1f91e-1f3fd", "raised_back_of_hand_dark_skin_tone": "1f91a-1f3ff", "swimming_woman_medium_light_skin_tone": "1f3ca-1f3fc-200d-2640-fe0f", "construction_worker_woman": "1f477-200d-2640-fe0f", "rugby_football": "1f3c9", "micronesia": "1f1eb-1f1f2", "point_up_2_medium_light_skin_tone": "1f446-1f3fc", "running_man_dark_skin_tone": "1f3c3-1f3ff-200d-2640-fe0f", "woman_playing_handball_medium_light_skin_tone": "1f93e-1f3fc-200d-2640-fe0f", "speaker": "1f508", "jersey": "1f1ef-1f1ea", "laughing": "1f606", "pregnant_woman": "1f930", "haircut_woman": "1f487", "blue_car": "1f699", "microscope": "1f52c", "postbox": "1f4ee", "man_firefighter_dark_skin_tone": "1f468-1f3ff", "sunny": "2600-fe0f", "beginner": "1f530", "clap_medium_light_skin_tone": "1f44f-1f3fc", "man_with_turban_dark_skin_tone": "1f473-1f3ff-200d-2640-fe0f", "rotating_light": "1f6a8", "saudi_arabia": "1f1f8-1f1e6", "family_woman_woman_girl_girl_medium_skin_tone": "1f469-1f3fd", "family_woman_girl_boy_light_skin_tone": "1f469-1f3fb", "man_with_gua_pi_mao": "1f472", "electric_plug": "1f50c", "panama": "1f1f5-1f1e6", "family_woman_woman_girl_light_skin_tone": "1f469-1f3fb", "thinking": "1f914", "point_down": "1f447", "spider": "1f577", "cloud_with_lightning_and_rain": "26c8", "ice_skate": "26f8", "ok_man_medium_dark_skin_tone": "1f646-1f3fe-200d-2642-fe0f", "netherlands": "1f1f3-1f1f1", "family_man_woman_boy": "1f46a", "orange": "1f34a", "snowboarder": "1f3c2", "passenger_ship": "1f6f3", "arrows_counterclockwise": "1f504", "tractor": "1f69c", "gambia": "1f1ec-1f1f2", "middle_finger_dark_skin_tone": "1f595-1f3ff", "tipping_hand_woman_medium_dark_skin_tone": "1f481-1f3fe-200d-2640-fe0f", "family_man_man_girl_boy_medium_light_skin_tone": "1f468-1f3fc", "thumbsup": "1f44d", "couple": "1f46b", "pouch": "1f45d", "asterisk": "002a-fe0f-20e3", "anguilla": "1f1e6-1f1ee", "woman_cook_light_skin_tone": "1f469-1f3fb", "kissing_cat": "1f63d", "nose": "1f443", "point_left_medium_skin_tone": "1f448-1f3fd", "baby_chick": "1f424", "deciduous_tree": "1f333", "u7533": "1f238", "surfing_woman_dark_skin_tone": "1f3c4-1f3ff-200d-2640-fe0f", "woman_shrugging_medium_dark_skin_tone": "1f937-1f3fe-200d-2640-fe0f", "family_woman_woman_boy_boy_dark_skin_tone": "1f469-1f3ff", "cloud_with_rain": "1f327", "oden": "1f362", "botswana": "1f1e7-1f1fc", "greenland": "1f1ec-1f1f1", "man_office_worker_light_skin_tone": "1f468-1f3fb", "raising_hand_woman_medium_dark_skin_tone": "1f64b-1f3fe-200d-2640-fe0f", "family_man_man_girl_boy_medium_dark_skin_tone": "1f468-1f3fe", "school": "1f3eb", "woman_astronaut_light_skin_tone": "1f469-1f3fb", "woman_judge_medium_skin_tone": "1f469-1f3fd", "dancing_men": "1f46f-200d-2642-fe0f", "paperclips": "1f587", "underage": "1f51e", "ok_woman_dark_skin_tone": "1f646-1f3ff-200d-2640-fe0f", "man_playing_handball_dark_skin_tone": "1f93e-1f3ff-200d-2642-fe0f", "family_man_girl_girl": "1f468-200d-1f467-200d-1f467", "wind_face": "1f32c", "banana": "1f34c", "eight": "0038-fe0f-20e3", "man_technologist_medium_dark_skin_tone": "1f468-1f3fe", "man_office_worker_medium_skin_tone": "1f468-1f3fd", "walking_man_dark_skin_tone": "1f6b6-1f3ff-200d-2640-fe0f", "family_man_man_girl_girl_medium_skin_tone": "1f468-1f3fd", "snowman": "26c4-fe0f", "basketball_man": "26f9-fe0f", "information_source": "2139-fe0f", "cote_divoire": "1f1e8-1f1ee", "man_in_tuxedo_light_skin_tone": "1f935-1f3fb", "walking_woman_light_skin_tone": "1f6b6-1f3fb-200d-2640-fe0f", "woman_playing_water_polo_light_skin_tone": "1f93d-1f3fb-200d-2640-fe0f", "bird": "1f426", "o": "2b55-fe0f", "family_woman_girl_medium_skin_tone": "1f469-1f3fd", "rowing_woman_dark_skin_tone": "1f6a3-1f3ff-200d-2640-fe0f", "facepunch": "1f44a", "railway_car": "1f683", "wave_dark_skin_tone": "1f44b-1f3ff", "man_cook_medium_dark_skin_tone": "1f468-1f3fe", "prince_medium_light_skin_tone": "1f934-1f3fc", "cowboy_hat_face": "1f920", "handbag": "1f45c", "hourglass": "231b-fe0f", "albania": "1f1e6-1f1f1", "chile": "1f1e8-1f1f1", "woman_singer_medium_skin_tone": "1f469-1f3fd", "ear_medium_skin_tone": "1f442-1f3fd", "pouting_man_medium_skin_tone": "1f64e-1f3fd-200d-2642-fe0f", "surfing_man_medium_light_skin_tone": "1f3c4-1f3fc-200d-2640-fe0f", "eggplant": "1f346", "next_track_button": "23ed", "gabon": "1f1ec-1f1e6", "western_sahara": "1f1ea-1f1ed", "raised_hands_light_skin_tone": "1f64c-1f3fb", "older_woman_medium_light_skin_tone": "1f475-1f3fc", "joy_cat": "1f639", "feet": "1f43e", "partly_sunny": "26c5-fe0f", "pig_nose": "1f43d", "wc": "1f6be", "malaysia": "1f1f2-1f1fe", "girl_medium_light_skin_tone": "1f467-1f3fc", "man_office_worker_medium_dark_skin_tone": "1f468-1f3fe", "man_mechanic_medium_light_skin_tone": "1f468-1f3fc", "shamrock": "2618-fe0f", "tumbler_glass": "1f943", "palestinian_territories": "1f1f5-1f1f8", "kissing": "1f617", "city_sunset": "1f306", "pencil2": "270f-fe0f", "cool": "1f192", "australia": "1f1e6-1f1fa", "green_heart": "1f49a", "sparkle": "2747-fe0f", "ng_woman": "1f645", "high_heel": "1f460", "hamster": "1f439", "last_quarter_moon": "1f317", "stopwatch": "23f1", "date": "1f4c5", "nail_care_dark_skin_tone": "1f485-1f3ff", "santa_dark_skin_tone": "1f385-1f3ff", "astonished": "1f632", "mushroom": "1f344", "radio": "1f4fb", "hammer_and_wrench": "1f6e0", "arrow_down": "2b07-fe0f", "speech_balloon": "1f4ac", "couple_with_heart_man_man_medium_skin_tone": "1f468-1f3fd", "euro": "1f4b6", "es": "1f1ea-1f1f8", "woman_factory_worker_medium_light_skin_tone": "1f469-1f3fc", "pouting_woman_dark_skin_tone": "1f64e-1f3ff-200d-2640-fe0f", "massage_woman": "1f486", "spades": "2660-fe0f", "blonde_woman_dark_skin_tone": "1f471-1f3ff-200d-2640-fe0f", "man_farmer_medium_skin_tone": "1f468-1f3fd", "man_mechanic_medium_skin_tone": "1f468-1f3fd", "family_man_boy_dark_skin_tone": "1f468-1f3ff", "man_juggling_medium_light_skin_tone": "1f939-1f3fc-200d-2642-fe0f", "hearts": "2665-fe0f", "clock930": "1f564", "central_african_republic": "1f1e8-1f1eb", "boy_medium_skin_tone": "1f466-1f3fd", "pregnant_woman_medium_skin_tone": "1f930-1f3fd", "woman_facepalming_medium_light_skin_tone": "1f926-1f3fc-200d-2640-fe0f", "palm_tree": "1f334", "rose": "1f339", "beers": "1f37b", "red_car": "1f697", "no_entry": "26d4-fe0f", "candy": "1f36c", "fist_oncoming_medium_skin_tone": "1f44a-1f3fd", "rowing_woman_medium_skin_tone": "1f6a3-1f3fd-200d-2640-fe0f", "sake": "1f376", "oncoming_police_car": "1f694", "woman_teacher_medium_dark_skin_tone": "1f469-1f3fe", "family_man_woman_girl_girl_medium_skin_tone": "1f468-1f3fd", "kissing_closed_eyes": "1f61a", "pager": "1f4df", "pencil": "1f4dd", "copyright": "00a9-fe0f", "wave_medium_skin_tone": "1f44b-1f3fd", "loud_sound": "1f50a", "luxembourg": "1f1f1-1f1fa", "policewoman_dark_skin_tone": "1f46e-1f3ff-200d-2640-fe0f", "woman_cartwheeling_medium_skin_tone": "1f938-1f3fd-200d-2640-fe0f", "swimming_woman_medium_dark_skin_tone": "1f3ca-1f3fe-200d-2640-fe0f", "family_man_man_girl_boy": "1f468-200d-1f468-200d-1f467-200d-1f466", "police_car": "1f693", "mailbox_with_no_mail": "1f4ed", "middle_finger_light_skin_tone": "1f595-1f3fb", "pregnant_woman_medium_light_skin_tone": "1f930-1f3fc", "raising_hand_woman_medium_skin_tone": "1f64b-1f3fd-200d-2640-fe0f", "running": "1f3c3", "sun_with_face": "1f31e", "man_teacher_dark_skin_tone": "1f468-1f3ff", "family_man_woman_girl_girl_dark_skin_tone": "1f468-1f3ff", "izakaya_lantern": "1f3ee", "comoros": "1f1f0-1f1f2", "fist_oncoming_medium_dark_skin_tone": "1f44a-1f3fe", "man_singer": "1f468-200d-1f3a4", "mountain_bicyclist": "1f6b5", "point_down_light_skin_tone": "1f447-1f3fb", "family_man_woman_girl_boy_medium_dark_skin_tone": "1f468-1f3fe", "sob": "1f62d", "ophiuchus": "26ce", "greece": "1f1ec-1f1f7", "raised_back_of_hand_medium_skin_tone": "1f91a-1f3fd", "family_man_man_boy_light_skin_tone": "1f468-1f3fb", "woman_cartwheeling_light_skin_tone": "1f938-1f3fb-200d-2640-fe0f", "massage_woman_light_skin_tone": "1f486-1f3fb-200d-2640-fe0f", "fishing_pole_and_fish": "1f3a3", "two_hearts": "1f495", "armenia": "1f1e6-1f1f2", "south_africa": "1f1ff-1f1e6", "boy_light_skin_tone": "1f466-1f3fb", "man_in_tuxedo_medium_dark_skin_tone": "1f935-1f3fe", "kiribati": "1f1f0-1f1ee", "v_dark_skin_tone": "270c-1f3ff", "frowning_man_medium_light_skin_tone": "1f64d-1f3fc-200d-2642-fe0f", "family_woman_woman_girl_boy": "1f469-200d-1f469-200d-1f467-200d-1f466", "family_woman_girl_boy_medium_dark_skin_tone": "1f469-1f3fe", "leopard": "1f406", "fireworks": "1f386", "clock6": "1f555", "bowing_man_medium_light_skin_tone": "1f647-1f3fc-200d-2640-fe0f", "raising_hand": "1f64b", "family_man_woman_girl_girl_light_skin_tone": "1f468-1f3fb", "vulcan_salute_medium_light_skin_tone": "1f596-1f3fc", "guardswoman_medium_light_skin_tone": "1f482-1f3fc-200d-2640-fe0f", "muscle": "1f4aa", "full_moon": "1f315", "pisces": "2653-fe0f", "kosovo": "1f1fd-1f1f0", "fist_left_dark_skin_tone": "1f91b-1f3ff", "point_up_2_dark_skin_tone": "1f446-1f3ff", "man_technologist_dark_skin_tone": "1f468-1f3ff", "spoon": "1f944", "nigeria": "1f1f3-1f1ec", "raised_back_of_hand_medium_light_skin_tone": "1f91a-1f3fc", "blonde_woman_light_skin_tone": "1f471-1f3fb-200d-2640-fe0f", "man_dancing_light_skin_tone": "1f57a-1f3fb", "shrimp": "1f990", "mountain_biking_man": "1f6b5", "boat": "26f5-fe0f", "egypt": "1f1ea-1f1ec", "family_woman_woman_boy_light_skin_tone": "1f469-1f3fb", "man_playing_water_polo_light_skin_tone": "1f93d-1f3fb-200d-2642-fe0f", "family_man_man_boy_boy": "1f468-200d-1f468-200d-1f466-200d-1f466", "foggy": "1f301", "construction_worker_woman_medium_light_skin_tone": "1f477-1f3fc-200d-2640-fe0f", "princess_medium_skin_tone": "1f478-1f3fd", "man_dancing_medium_dark_skin_tone": "1f57a-1f3fe", "couple_with_heart_man_man_dark_skin_tone": "1f468-1f3ff", "carousel_horse": "1f3a0", "crayon": "1f58d", "niue": "1f1f3-1f1fa", "woman_office_worker_medium_skin_tone": "1f469-1f3fd", "swimming_man_medium_skin_tone": "1f3ca-1f3fd-200d-2640-fe0f", "pensive": "1f614", "fire": "1f525", "monorail": "1f69d", "guam": "1f1ec-1f1fa", "older_woman_light_skin_tone": "1f475-1f3fb", "man_facepalming_medium_light_skin_tone": "1f926-1f3fc-200d-2642-fe0f", "family_man_man_girl": "1f468-200d-1f468-200d-1f467", "hammer_and_pick": "2692", "space_invader": "1f47e", "waning_crescent_moon": "1f318", "love_letter": "1f48c", "star_and_crescent": "262a-fe0f", "man_with_turban_light_skin_tone": "1f473-1f3fb-200d-2640-fe0f", "tipping_hand_woman_light_skin_tone": "1f481-1f3fb-200d-2640-fe0f", "dress": "1f457", "rainbow": "1f308", "cheese": "1f9c0", "bento": "1f371", "gear": "2699-fe0f", "-1_medium_skin_tone": "1f44e-1f3fd", "family_man_girl_boy_dark_skin_tone": "1f468-1f3ff", "fish_cake": "1f365", "desert_island": "1f3dd", "crystal_ball": "1f52e", "lock": "1f512", "no_good_man_medium_skin_tone": "1f645-1f3fd-200d-2642-fe0f", "small_blue_diamond": "1f539", "fist_raised_medium_dark_skin_tone": "270a-1f3fe", "man_health_worker_medium_light_skin_tone": "1f468-1f3fc", "ok_man_medium_light_skin_tone": "1f646-1f3fc-200d-2642-fe0f", "man_cartwheeling_dark_skin_tone": "1f938-1f3ff-200d-2642-fe0f", "policeman": "1f46e", "closed_lock_with_key": "1f510", "koko": "1f201", "guardswoman": "1f482-200d-2640-fe0f", "mailbox": "1f4eb", "weight_lifting_woman_light_skin_tone": "1f3cb-1f3fb-200d-2640-fe0f", "drooling_face": "1f924", "motorway": "1f6e3", "orthodox_cross": "2626-fe0f", "peru": "1f1f5-1f1ea", "woman_firefighter_medium_light_skin_tone": "1f469-1f3fc", "atom_symbol": "269b-fe0f", "benin": "1f1e7-1f1ef", "montenegro": "1f1f2-1f1ea", "tonga": "1f1f9-1f1f4", "family_man_boy_boy_medium_skin_tone": "1f468-1f3fd", "man_mechanic_light_skin_tone": "1f468-1f3fb", "female_detective": "1f575-fe0f-200d-2640-fe0f", "closed_umbrella": "1f302", "cow2": "1f404", "ballot_box": "1f5f3", "construction_worker_man_dark_skin_tone": "1f477-1f3ff-200d-2640-fe0f", "woman_technologist_medium_dark_skin_tone": "1f469-1f3fe", "indonesia": "1f1ee-1f1e9", "woman_pilot_medium_light_skin_tone": "1f469-1f3fc", "family_man_man_boy_boy_medium_light_skin_tone": "1f468-1f3fc", "call_me_hand": "1f919", "sun_behind_small_cloud": "1f324", "national_park": "1f3de", "radio_button": "1f518", "selfie_medium_light_skin_tone": "1f933-1f3fc", "woman_firefighter": "1f469-200d-1f692", "metal_dark_skin_tone": "1f918-1f3ff", "older_woman": "1f475", "man_factory_worker_medium_skin_tone": "1f468-1f3fd", "pick": "26cf", "woman_student_medium_skin_tone": "1f469-1f3fd", "mountain_biking_woman_light_skin_tone": "1f6b5-1f3fb-200d-2640-fe0f", "flags": "1f38f", "black_nib": "2712-fe0f", "rwanda": "1f1f7-1f1fc", "surfing_man_light_skin_tone": "1f3c4-1f3fb-200d-2640-fe0f", "first_quarter_moon": "1f313", "oil_drum": "1f6e2", "heart_decoration": "1f49f", "jp": "1f1ef-1f1f5", "woman_pilot": "1f469-200d-2708-fe0f", "city_sunrise": "1f307", "leo": "264c-fe0f", "arrow_up_down": "2195-fe0f", "selfie_medium_skin_tone": "1f933-1f3fd", "surfing_man_medium_skin_tone": "1f3c4-1f3fd-200d-2640-fe0f", "ramen": "1f35c", "up": "1f199", "woman_medium_light_skin_tone": "1f469-1f3fc", "woman_artist": "1f469-200d-1f3a8", "football": "1f3c8", "shopping": "1f6cd", "small_red_triangle_down": "1f53b", "crossed_fingers_light_skin_tone": "1f91e-1f3fb", "woman_artist_medium_dark_skin_tone": "1f469-1f3fe", "milk_glass": "1f95b", "clapper": "1f3ac", "star_of_david": "2721-fe0f", "dominican_republic": "1f1e9-1f1f4", "woman_teacher_light_skin_tone": "1f469-1f3fb", "man_juggling_medium_skin_tone": "1f939-1f3fd-200d-2642-fe0f", "-1": "1f44e", "wedding": "1f492", "faroe_islands": "1f1eb-1f1f4", "raising_hand_man_medium_dark_skin_tone": "1f64b-1f3fe-200d-2642-fe0f", "gemini": "264a-fe0f", "st_helena": "1f1f8-1f1ed", "running_woman_medium_light_skin_tone": "1f3c3-1f3fc-200d-2640-fe0f", "biking_woman_light_skin_tone": "1f6b4-1f3fb-200d-2640-fe0f", "paperclip": "1f4ce", "wave_medium_light_skin_tone": "1f44b-1f3fc", "man_factory_worker_medium_dark_skin_tone": "1f468-1f3fe", "woman_cartwheeling_medium_dark_skin_tone": "1f938-1f3fe-200d-2640-fe0f", "clock12": "1f55b", "ru": "1f1f7-1f1fa", "clown_face": "1f921", "pizza": "1f355", "hole": "1f573", "incoming_envelope": "1f4e8", "yin_yang": "262f-fe0f", "warning": "26a0-fe0f", "family_man_man_girl_boy_dark_skin_tone": "1f468-1f3ff", "man_cartwheeling_medium_skin_tone": "1f938-1f3fd-200d-2642-fe0f", "ram": "1f40f", "cucumber": "1f952", "heartbeat": "1f493", "swaziland": "1f1f8-1f1ff", "nail_care_medium_dark_skin_tone": "1f485-1f3fe", "bath_medium_skin_tone": "1f6c0-1f3fd", "strawberry": "1f353", "peanuts": "1f95c", "field_hockey": "1f3d1", "cricket": "1f3cf", "woman_farmer_medium_dark_skin_tone": "1f469-1f3fe", "family_man_man_girl_girl_light_skin_tone": "1f468-1f3fb", "penguin": "1f427", "star": "2b50-fe0f", "woman_shrugging_light_skin_tone": "1f937-1f3fb-200d-2640-fe0f", "golfing_man_light_skin_tone": "1f3cc-1f3fb-200d-2640-fe0f", "innocent": "1f607", "mosque": "1f54c", "calendar": "1f4c6", "canada": "1f1e8-1f1e6", "rage4": "rage4", "woman_office_worker_medium_dark_skin_tone": "1f469-1f3fe", "poodle": "1f429", "grapes": "1f347", "love_hotel": "1f3e9", "vulcan_salute_medium_skin_tone": "1f596-1f3fd", "guardsman_medium_dark_skin_tone": "1f482-1f3fe-200d-2640-fe0f", "raising_hand_man_light_skin_tone": "1f64b-1f3fb-200d-2642-fe0f", "sleeping": "1f634", "nail_care": "1f485", "monkey": "1f412", "sao_tome_principe": "1f1f8-1f1f9", "dancer_medium_dark_skin_tone": "1f483-1f3fe", "classical_building": "1f3db", "swimming_woman_medium_skin_tone": "1f3ca-1f3fd-200d-2640-fe0f", "ok_hand": "1f44c", "rice_cracker": "1f358", "moyai": "1f5ff", "rage2": "rage2", "angel_light_skin_tone": "1f47c-1f3fb", "family_man_man_boy_boy_dark_skin_tone": "1f468-1f3ff", "smile_cat": "1f638", "angola": "1f1e6-1f1f4", "cameroon": "1f1e8-1f1f2", "man_student_medium_dark_skin_tone": "1f468-1f3fe", "weight_lifting_woman_medium_light_skin_tone": "1f3cb-1f3fc-200d-2640-fe0f", "waxing_crescent_moon": "1f312", "articulated_lorry": "1f69b", "pouting_woman_light_skin_tone": "1f64e-1f3fb-200d-2640-fe0f", "running_man_medium_dark_skin_tone": "1f3c3-1f3fe-200d-2640-fe0f", "couple_with_heart_woman_woman_light_skin_tone": "1f469-1f3fb", "horse_racing_light_skin_tone": "1f3c7-1f3fb", "raised_back_of_hand": "1f91a", "saxophone": "1f3b7", "right_anger_bubble": "1f5ef", "tokelau": "1f1f9-1f1f0", "no_good_woman_medium_light_skin_tone": "1f645-1f3fc-200d-2640-fe0f", "walking_woman_medium_skin_tone": "1f6b6-1f3fd-200d-2640-fe0f", "family_woman_girl_girl": "1f469-200d-1f467-200d-1f467", "cake": "1f370", "abcd": "1f521", "tuvalu": "1f1f9-1f1fb", "suspect": "suspect", "mattermost": "mattermost", "swimming_woman_light_skin_tone": "1f3ca-1f3fb-200d-2640-fe0f", "white_medium_square": "25fb-fe0f", "haircut_woman_medium_skin_tone": "1f487-1f3fd-200d-2640-fe0f", "massage_woman_dark_skin_tone": "1f486-1f3ff-200d-2640-fe0f", "family_man_woman_girl_light_skin_tone": "1f468-1f3fb", "turks_caicos_islands": "1f1f9-1f1e8", "point_left_dark_skin_tone": "1f448-1f3ff", "family_man_man_boy_medium_dark_skin_tone": "1f468-1f3fe", "hand": "270b", "coffee": "2615-fe0f", "somalia": "1f1f8-1f1f4", "mountain_biking_man_dark_skin_tone": "1f6b5-1f3ff-200d-2640-fe0f", "hatching_chick": "1f423", "pear": "1f350", "baby_bottle": "1f37c", "ribbon": "1f380", "st_kitts_nevis": "1f1f0-1f1f3", "radioactive": "2622-fe0f", "end": "1f51a", "hand_medium_skin_tone": "270b-1f3fd", "family_woman_woman_girl_medium_light_skin_tone": "1f469-1f3fc", "3rd_place_medal": "1f949", "fist_left_medium_dark_skin_tone": "1f91b-1f3fe", "bolivia": "1f1e7-1f1f4", "point_up_light_skin_tone": "261d-1f3fb", "cherries": "1f352", "inbox_tray": "1f4e5", "pitcairn_islands": "1f1f5-1f1f3", "rage1": "rage1", "man_farmer_medium_dark_skin_tone": "1f468-1f3fe", "woman_with_turban": "1f473-200d-2640-fe0f", "unicorn": "1f984", "butterfly": "1f98b", "watch": "231a-fe0f", "arrow_up_small": "1f53c", "triangular_flag_on_post": "1f6a9", "heart_eyes": "1f60d", "shallow_pan_of_food": "1f958", "broken_heart": "1f494", "family_man_boy_boy_dark_skin_tone": "1f468-1f3ff", "golfing_woman_dark_skin_tone": "1f3cc-1f3ff-200d-2640-fe0f", "bath_dark_skin_tone": "1f6c0-1f3ff", "selfie": "1f933", "congratulations": "3297-fe0f", "baby_medium_light_skin_tone": "1f476-1f3fc", "woman_health_worker_medium_skin_tone": "1f469-1f3fd", "man_juggling": "1f939-200d-2642-fe0f", "arrow_down_small": "1f53d", "writing_hand_medium_light_skin_tone": "270d-1f3fc", "blonde_woman": "1f471-200d-2640-fe0f", "massage": "1f486", "metro": "1f687", "bath": "1f6c0", "female_detective_light_skin_tone": "1f575-1f3fb-200d-2640-fe0f", "haircut_man_light_skin_tone": "1f487-1f3fb-200d-2642-fe0f", "bowing_woman_medium_dark_skin_tone": "1f647-1f3fe-200d-2640-fe0f", "family_woman_woman_boy_medium_dark_skin_tone": "1f469-1f3fe", "shell": "1f41a", "seychelles": "1f1f8-1f1e8", "tipping_hand_man_medium_skin_tone": "1f481-1f3fd-200d-2642-fe0f", "panda_face": "1f43c", "sint_maarten": "1f1f8-1f1fd", "face_with_head_bandage": "1f915", "checkered_flag": "1f3c1", "samoa": "1f1fc-1f1f8", "v_medium_skin_tone": "270c-1f3fd", "couple_with_heart_man_man": "1f468-200d-2764-fe0f-200d-1f468", "shaved_ice": "1f367", "badminton": "1f3f8", "clock530": "1f560", "man_playing_water_polo_medium_dark_skin_tone": "1f93d-1f3fe-200d-2642-fe0f", "bulgaria": "1f1e7-1f1ec", "hurtrealbad": "hurtrealbad", "fist_oncoming_dark_skin_tone": "1f44a-1f3ff", "bat": "1f987", "signal_strength": "1f4f6", "iran": "1f1ee-1f1f7", "construction_worker_woman_medium_dark_skin_tone": "1f477-1f3fe-200d-2640-fe0f", "kiwi_fruit": "1f95d", "2nd_place_medal": "1f948", "kaaba": "1f54b", "knife": "1f52a", "ok_hand_light_skin_tone": "1f44c-1f3fb", "angel_medium_dark_skin_tone": "1f47c-1f3fe", "spider_web": "1f578", "oncoming_taxi": "1f696", "bookmark": "1f516", "u6307": "1f22f-fe0f", "za": "1f1ff-1f1e6", "fist_raised_light_skin_tone": "270a-1f3fb", "mag_right": "1f50e", "guinea": "1f1ec-1f1f3", "family_woman_woman_girl_dark_skin_tone": "1f469-1f3ff", "man_playing_handball_medium_light_skin_tone": "1f93e-1f3fc-200d-2642-fe0f", "game_die": "1f3b2", "bullettrain_front": "1f685", "speedboat": "1f6a4", "hand_dark_skin_tone": "270b-1f3ff", "selfie_light_skin_tone": "1f933-1f3fb", "family_man_woman_boy_boy_dark_skin_tone": "1f468-1f3ff", "running_man": "1f3c3", "couplekiss_woman_woman": "1f469-200d-2764-fe0f-200d-1f48b-200d-1f469", "woman_teacher": "1f469-200d-1f3eb", "running_shirt_with_sash": "1f3bd", "bowing_man_medium_skin_tone": "1f647-1f3fd-200d-2640-fe0f", "point_right_medium_dark_skin_tone": "1f449-1f3fe", "man_cartwheeling_medium_light_skin_tone": "1f938-1f3fc-200d-2642-fe0f", "pouting_man_light_skin_tone": "1f64e-1f3fb-200d-2642-fe0f", "biking_man_light_skin_tone": "1f6b4-1f3fb-200d-2640-fe0f", "oncoming_automobile": "1f698", "steam_locomotive": "1f682", "newspaper": "1f4f0", "antigua_barbuda": "1f1e6-1f1ec", "macau": "1f1f2-1f1f4", "niger": "1f1f3-1f1ea", "chicken": "1f414", "flashlight": "1f526", "family_man_woman_boy_boy_medium_skin_tone": "1f468-1f3fd", "mens": "1f6b9", "it": "1f1ee-1f1f9", "new_caledonia": "1f1f3-1f1e8", "pray_medium_light_skin_tone": "1f64f-1f3fc", "nose_medium_dark_skin_tone": "1f443-1f3fe", "man_facepalming_medium_dark_skin_tone": "1f926-1f3fe-200d-2642-fe0f", "poop": "1f4a9", "clap_light_skin_tone": "1f44f-1f3fb", "guardsman_medium_skin_tone": "1f482-1f3fd-200d-2640-fe0f", "woman_teacher_dark_skin_tone": "1f469-1f3ff", "grinning": "1f600", "aries": "2648-fe0f", "mrs_claus": "1f936", "green_book": "1f4d7", "middle_finger_medium_dark_skin_tone": "1f595-1f3fe", "rowing_woman_light_skin_tone": "1f6a3-1f3fb-200d-2640-fe0f", "tokyo_tower": "1f5fc", "printer": "1f5a8", "put_litter_in_its_place": "1f6ae", "suriname": "1f1f8-1f1f7", "woman_light_skin_tone": "1f469-1f3fb", "man_playing_water_polo_medium_light_skin_tone": "1f93d-1f3fc-200d-2642-fe0f", "dove": "1f54a", "latin_cross": "271d-fe0f", "exclamation": "2757-fe0f", "man_health_worker_dark_skin_tone": "1f468-1f3ff", "bride_with_veil_light_skin_tone": "1f470-1f3fb", "rowboat": "1f6a3", "world_map": "1f5fa", "sleeping_bed": "1f6cc", "haircut_man_dark_skin_tone": "1f487-1f3ff-200d-2642-fe0f", "surfing_man_dark_skin_tone": "1f3c4-1f3ff-200d-2640-fe0f", "couple_with_heart_woman_man": "1f491", "chart_with_upwards_trend": "1f4c8", "fist_right_dark_skin_tone": "1f91c-1f3ff", "raised_hand_with_fingers_splayed_medium_skin_tone": "1f590-1f3fd", "older_woman_medium_dark_skin_tone": "1f475-1f3fe", "couplekiss_woman_woman_medium_dark_skin_tone": "1f469-1f3fe", "hatched_chick": "1f425", "running_man_medium_skin_tone": "1f3c3-1f3fd-200d-2640-fe0f", "chocolate_bar": "1f36b", "grenada": "1f1ec-1f1e9", "man_farmer_dark_skin_tone": "1f468-1f3ff", "milky_way": "1f30c", "slovakia": "1f1f8-1f1f0", "selfie_dark_skin_tone": "1f933-1f3ff", "prince_medium_skin_tone": "1f934-1f3fd", "family_man_boy": "1f468-200d-1f466", "chains": "26d3", "british_virgin_islands": "1f1fb-1f1ec", "bow_and_arrow": "1f3f9", "ferry": "26f4", "o2": "1f17e-fe0f", "st_barthelemy": "1f1e7-1f1f1", "policewoman_medium_skin_tone": "1f46e-1f3fd-200d-2640-fe0f", "imp": "1f47f", "bathtub": "1f6c1", "anger": "1f4a2", "previous_track_button": "23ee", "pouting_woman_medium_light_skin_tone": "1f64e-1f3fc-200d-2640-fe0f", "rabbit2": "1f407", "newspaper_roll": "1f5de", "one": "0031-fe0f-20e3", "family_man_man_girl_medium_skin_tone": "1f468-1f3fd", "pouting_woman": "1f64e", "moneybag": "1f4b0", "guardsman_dark_skin_tone": "1f482-1f3ff-200d-2640-fe0f", "man_shrugging_medium_light_skin_tone": "1f937-1f3fc-200d-2642-fe0f", "smirk": "1f60f", "woman_farmer": "1f469-200d-1f33e", "ocean": "1f30a", "sweat_drops": "1f4a6", "x": "274c", "woman_judge_medium_dark_skin_tone": "1f469-1f3fe", "tropical_drink": "1f379", "brunei": "1f1e7-1f1f3", "woman_artist_medium_light_skin_tone": "1f469-1f3fc", "pregnant_woman_medium_dark_skin_tone": "1f930-1f3fe", "basketball_woman_light_skin_tone": "26f9-1f3fb-200d-2640-fe0f", "evergreen_tree": "1f332", "fax": "1f4e0", "woman_medium_dark_skin_tone": "1f469-1f3fe", "walking_woman_medium_dark_skin_tone": "1f6b6-1f3fe-200d-2640-fe0f", "lollipop": "1f36d", "bicyclist": "1f6b4", "bulb": "1f4a1", "computer": "1f4bb", "frowning_man_dark_skin_tone": "1f64d-1f3ff-200d-2642-fe0f", "guardsman_medium_light_skin_tone": "1f482-1f3fc-200d-2640-fe0f", "dancer_light_skin_tone": "1f483-1f3fb", "no_good_woman": "1f645", "cherry_blossom": "1f338", "woman_playing_water_polo": "1f93d-200d-2640-fe0f", "heavy_division_sign": "2797", "sri_lanka": "1f1f1-1f1f0", "-1_medium_light_skin_tone": "1f44e-1f3fc", "family_woman_girl_girl_dark_skin_tone": "1f469-1f3ff", "raised_hands": "1f64c", "sandal": "1f461", "rhinoceros": "1f98f", "swimming_man": "1f3ca", "scissors": "2702-fe0f", "horse_racing_dark_skin_tone": "1f3c7-1f3ff", "coffin": "26b0-fe0f", "clock1": "1f550", "eritrea": "1f1ea-1f1f7", "qatar": "1f1f6-1f1e6", "tanzania": "1f1f9-1f1ff", "pregnant_woman_light_skin_tone": "1f930-1f3fb", "cop": "1f46e", "tipping_hand_woman": "1f481", "estonia": "1f1ea-1f1ea", "man_singer_light_skin_tone": "1f468-1f3fb", "woman_judge_dark_skin_tone": "1f469-1f3ff", "business_suit_levitating_medium_skin_tone": "1f574-1f3fd", "blowfish": "1f421", "mountain_railway": "1f69e", "fast_forward": "23e9", "+1_medium_skin_tone": "1f44d-1f3fd", "goat": "1f410", "congo_kinshasa": "1f1e8-1f1e9", "point_down_dark_skin_tone": "1f447-1f3ff", "basketball_man_light_skin_tone": "26f9-1f3fb-200d-2640-fe0f", "woman_playing_handball_medium_dark_skin_tone": "1f93e-1f3fe-200d-2640-fe0f", "doughnut": "1f369", "musical_keyboard": "1f3b9", "couplekiss_woman_woman_medium_skin_tone": "1f469-1f3fd", "heavy_heart_exclamation": "2763-fe0f", "u6e80": "1f235", "woman_with_turban_medium_skin_tone": "1f473-1f3fd-200d-2640-fe0f", "horse_racing_medium_skin_tone": "1f3c7-1f3fd", "ear": "1f442", "canoe": "1f6f6", "andorra": "1f1e6-1f1e9", "ca": "1f1e8-1f1e6", "family_man_man_boy": "1f468-200d-1f468-200d-1f466", "ticket": "1f3ab", "station": "1f689", "large_blue_circle": "1f535", "palau": "1f1f5-1f1fc", "blush": "1f60a", "man_student": "1f468-200d-1f393", "woman_singer": "1f469-200d-1f3a4", "house_with_garden": "1f3e1", "smoking": "1f6ac", "b": "1f171-fe0f", "golfing_woman_light_skin_tone": "1f3cc-1f3fb-200d-2640-fe0f", "man_artist_light_skin_tone": "1f468-1f3fb", "unamused": "1f612", "japanese_ogre": "1f479", "film_projector": "1f4fd", "ballot_box_with_check": "2611-fe0f", "goberserk": "goberserk", "metal_medium_light_skin_tone": "1f918-1f3fc", "latvia": "1f1f1-1f1fb", "moldova": "1f1f2-1f1e9", "mask": "1f637", "bowing_man": "1f647", "man_shrugging": "1f937-200d-2642-fe0f", "ping_pong": "1f3d3", "trackball": "1f5b2", "six": "0036-fe0f-20e3", "point_up_medium_skin_tone": "261d-1f3fd", "hotel": "1f3e8", "bookmark_tabs": "1f4d1", "chart_with_downwards_trend": "1f4c9", "v_light_skin_tone": "270c-1f3fb", "tipping_hand_woman_medium_skin_tone": "1f481-1f3fd-200d-2640-fe0f", "heart_eyes_cat": "1f63b", "dancer": "1f483", "movie_camera": "1f3a5", "two": "0032-fe0f-20e3", "clap_medium_dark_skin_tone": "1f44f-1f3fe", "woman_astronaut_medium_skin_tone": "1f469-1f3fd", "frowning": "1f626", "cry": "1f622", "no_bell": "1f515", "hand_medium_dark_skin_tone": "270b-1f3fe", "ear_light_skin_tone": "1f442-1f3fb", "family_man_girl_boy": "1f468-200d-1f467-200d-1f466", "swimming_woman": "1f3ca-200d-2640-fe0f", "mountain_biking_woman": "1f6b5-200d-2640-fe0f", "mantelpiece_clock": "1f570", "bermuda": "1f1e7-1f1f2", "new_zealand": "1f1f3-1f1ff", "massage_man_medium_dark_skin_tone": "1f486-1f3fe-200d-2642-fe0f", "crown": "1f451", "biking_man": "1f6b4", "woman_playing_water_polo_medium_skin_tone": "1f93d-1f3fd-200d-2640-fe0f", "man_in_tuxedo_dark_skin_tone": "1f935-1f3ff", "no_entry_sign": "1f6ab", "hash": "0023-fe0f-20e3", "white_small_square": "25ab-fe0f", "iraq": "1f1ee-1f1f6", "switzerland": "1f1e8-1f1ed", "woman_mechanic_light_skin_tone": "1f469-1f3fb", "squirrel": "shipit", "woman_cook_medium_light_skin_tone": "1f469-1f3fc", "confounded": "1f616", "+1": "1f44d", "rowing_man": "1f6a3", "mailbox_closed": "1f4ea", "customs": "1f6c3", "mayotte": "1f1fe-1f1f9", "man_mechanic_medium_dark_skin_tone": "1f468-1f3fe", "man_artist_medium_skin_tone": "1f468-1f3fd", "man_playing_handball_medium_skin_tone": "1f93e-1f3fd-200d-2642-fe0f", "grimacing": "1f62c", "dart": "1f3af", "wave_medium_dark_skin_tone": "1f44b-1f3fe", "slightly_smiling_face": "1f642", "medal_sports": "1f3c5", "bank": "1f3e6", "man_student_medium_light_skin_tone": "1f468-1f3fc", "man_pilot_light_skin_tone": "1f468-1f3fb", "weight_lifting_woman_medium_skin_tone": "1f3cb-1f3fd-200d-2640-fe0f", "dash": "1f4a8", "volcano": "1f30b", "antarctica": "1f1e6-1f1f6", "woman_facepalming_light_skin_tone": "1f926-1f3fb-200d-2640-fe0f", "man_dancing_medium_light_skin_tone": "1f57a-1f3fc", "scream_cat": "1f640", "fog": "1f32b", "fist_oncoming_light_skin_tone": "1f44a-1f3fb", "man_dancing_medium_skin_tone": "1f57a-1f3fd", "burrito": "1f32f", "thought_balloon": "1f4ad", "massage_man_medium_light_skin_tone": "1f486-1f3fc-200d-2642-fe0f", "couple_with_heart_woman_woman_dark_skin_tone": "1f469-1f3ff", "writing_hand": "270d-fe0f", "zap": "26a1-fe0f", "recycle": "267b-fe0f", "policewoman_medium_light_skin_tone": "1f46e-1f3fc-200d-2640-fe0f", "frowning_woman_medium_skin_tone": "1f64d-1f3fd-200d-2640-fe0f", "massage_man_light_skin_tone": "1f486-1f3fb-200d-2642-fe0f", "woman_student": "1f469-200d-1f393", "surfing_woman": "1f3c4-200d-2640-fe0f", "sunrise": "1f305", "open_file_folder": "1f4c2", "diamonds": "2666-fe0f", "family_man_woman_girl_girl": "1f468-200d-1f469-200d-1f467-200d-1f467", "airplane": "2708-fe0f", "arrow_heading_down": "2935-fe0f", "uruguay": "1f1fa-1f1fe", "point_down_medium_dark_skin_tone": "1f447-1f3fe", "family_man_man_boy_dark_skin_tone": "1f468-1f3ff", "family_man_woman_girl_boy": "1f468-200d-1f469-200d-1f467-200d-1f466", "confetti_ball": "1f38a", "flower_playing_cards": "1f3b4", "algeria": "1f1e9-1f1ff", "man_teacher_medium_light_skin_tone": "1f468-1f3fc", "woman_artist_light_skin_tone": "1f469-1f3fb", "family_man_woman_girl_medium_skin_tone": "1f468-1f3fd", "nerd_face": "1f913", "eyes": "1f440", "boot": "1f462", "unlock": "1f513", "zzz": "1f4a4", "vatican_city": "1f1fb-1f1e6", "hot_pepper": "1f336", "slot_machine": "1f3b0", "sunrise_over_mountains": "1f304", "haircut_man_medium_skin_tone": "1f487-1f3fd-200d-2642-fe0f", "stuck_out_tongue": "1f61b", "point_up_medium_dark_skin_tone": "261d-1f3fe", "vulcan_salute_medium_dark_skin_tone": "1f596-1f3fe", "family": "1f46a", "key": "1f511", "myanmar": "1f1f2-1f1f2", "policeman_medium_light_skin_tone": "1f46e-1f3fc-200d-2640-fe0f", "man_shrugging_medium_dark_skin_tone": "1f937-1f3fe-200d-2642-fe0f", "woman_health_worker": "1f469-200d-2695-fe0f", "woman_judge": "1f469-200d-2696-fe0f", "japan": "1f5fe", "dominica": "1f1e9-1f1f2", "dragon": "1f409", "open_book": "1f4d6", "raising_hand_man": "1f64b-200d-2642-fe0f", "bikini": "1f459", "loudspeaker": "1f4e2", "woman_astronaut_medium_light_skin_tone": "1f469-1f3fc", "envelope_with_arrow": "1f4e9", "thailand": "1f1f9-1f1ed", "point_up_medium_light_skin_tone": "261d-1f3fc", "baby_medium_dark_skin_tone": "1f476-1f3fe", "man_scientist_medium_skin_tone": "1f468-1f3fd", "bowing_woman_medium_light_skin_tone": "1f647-1f3fc-200d-2640-fe0f", "construction_worker": "1f477", "nut_and_bolt": "1f529", "sparkling_heart": "1f496", "couplekiss_woman_woman_dark_skin_tone": "1f469-1f3ff", "elephant": "1f418", "bar_chart": "1f4ca", "nose_dark_skin_tone": "1f443-1f3ff", "stop_button": "23f9", "family_man_woman_boy_boy_light_skin_tone": "1f468-1f3fb", "family_man_girl_medium_light_skin_tone": "1f468-1f3fc", "relieved": "1f60c", "man_in_tuxedo": "1f935", "kick_scooter": "1f6f4", "statue_of_liberty": "1f5fd", "information_desk_person": "1f481", "sa": "1f202-fe0f", "abc": "1f524", "robot": "1f916", "cat": "1f431", "accept": "1f251", "upside_down_face": "1f643", "cloud": "2601-fe0f", "frowning_man_light_skin_tone": "1f64d-1f3fb-200d-2642-fe0f", "walking_man_medium_skin_tone": "1f6b6-1f3fd-200d-2640-fe0f", "sparkles": "2728", "u5272": "1f239", "globe_with_meridians": "1f310", "frowning_woman_medium_dark_skin_tone": "1f64d-1f3fe-200d-2640-fe0f", "grey_exclamation": "2755", "tm": "2122-fe0f", "massage_man_dark_skin_tone": "1f486-1f3ff-200d-2642-fe0f", "family_woman_woman_girl_boy_dark_skin_tone": "1f469-1f3ff", "paintbrush": "1f58c", "arrow_right_hook": "21aa-fe0f", "mauritania": "1f1f2-1f1f7", "man_scientist_light_skin_tone": "1f468-1f3fb", "woman_juggling_light_skin_tone": "1f939-1f3fb-200d-2640-fe0f", "ok_woman": "1f646", "snail": "1f40c", "hocho": "1f52a", "arrow_forward": "25b6-fe0f", "french_southern_territories": "1f1f9-1f1eb", "iphone": "1f4f1", "princess_medium_light_skin_tone": "1f478-1f3fc", "maple_leaf": "1f341", "open_hands": "1f450", "racing_car": "1f3ce", "pill": "1f48a", "cuba": "1f1e8-1f1fa", "fist_raised_dark_skin_tone": "270a-1f3ff", "blonde_man_dark_skin_tone": "1f471-1f3ff-200d-2640-fe0f", "family_woman_girl_boy_dark_skin_tone": "1f469-1f3ff", "fox_face": "1f98a", "man_playing_handball": "1f93e-200d-2642-fe0f", "bullettrain_side": "1f684", "black_small_square": "25aa-fe0f", "kazakhstan": "1f1f0-1f1ff", "vanuatu": "1f1fb-1f1fa", "older_man_medium_skin_tone": "1f474-1f3fd", "man_teacher": "1f468-200d-1f3eb", "family_man_man_boy_boy_medium_dark_skin_tone": "1f468-1f3fe", "back": "1f519", "point_up_2_medium_dark_skin_tone": "1f446-1f3fe", "woman_teacher_medium_light_skin_tone": "1f469-1f3fc", "family_woman_boy_boy_medium_dark_skin_tone": "1f469-1f3fe", "surfing_woman_medium_light_skin_tone": "1f3c4-1f3fc-200d-2640-fe0f", "portugal": "1f1f5-1f1f9", "construction_worker_woman_dark_skin_tone": "1f477-1f3ff-200d-2640-fe0f", "family_man_man_boy_medium_skin_tone": "1f468-1f3fd", "family_man_girl_dark_skin_tone": "1f468-1f3ff", "woman_mechanic": "1f469-200d-1f527", "arrow_heading_up": "2934-fe0f", "clock330": "1f55e", "malawi": "1f1f2-1f1fc", "ok_hand_medium_dark_skin_tone": "1f44c-1f3fe", "prince_dark_skin_tone": "1f934-1f3ff", "ice_hockey": "1f3d2", "pk": "1f1f5-1f1f0", "san_marino": "1f1f8-1f1f2", "point_left_light_skin_tone": "1f448-1f3fb", "woman_office_worker_medium_light_skin_tone": "1f469-1f3fc", "swimming_man_medium_light_skin_tone": "1f3ca-1f3fc-200d-2640-fe0f", "stuffed_flatbread": "1f959", "aerial_tramway": "1f6a1", "family_man_man_girl_girl_dark_skin_tone": "1f468-1f3ff", "family_woman_girl_girl_medium_dark_skin_tone": "1f469-1f3fe", "closed_book": "1f4d5", "family_woman_girl_boy_medium_light_skin_tone": "1f469-1f3fc", "family_man_man_girl_boy_medium_skin_tone": "1f468-1f3fd", "v": "270c-fe0f", "play_or_pause_button": "23ef", "el_salvador": "1f1f8-1f1fb", "woman_judge_medium_light_skin_tone": "1f469-1f3fc", "santa_medium_light_skin_tone": "1f385-1f3fc", "couplekiss_man_man_light_skin_tone": "1f468-1f3fb", "blonde_man_light_skin_tone": "1f471-1f3fb-200d-2640-fe0f", "fist_right": "1f91c", "man_with_turban": "1f473", "cancer": "264b-fe0f", "tunisia": "1f1f9-1f1f3", "open_hands_medium_light_skin_tone": "1f450-1f3fc", "call_me_hand_medium_dark_skin_tone": "1f919-1f3fe", "tired_face": "1f62b", "tongue": "1f445", "shower": "1f6bf", "british_indian_ocean_territory": "1f1ee-1f1f4", "man_firefighter_medium_light_skin_tone": "1f468-1f3fc", "couple_with_heart_woman_woman_medium_dark_skin_tone": "1f469-1f3fe", "crescent_moon": "1f319", "ecuador": "1f1ea-1f1e8", "french_polynesia": "1f1f5-1f1eb", "man_light_skin_tone": "1f468-1f3fb", "mountain_biking_woman_medium_skin_tone": "1f6b5-1f3fd-200d-2640-fe0f", "pakistan": "1f1f5-1f1f0", "open_hands_medium_dark_skin_tone": "1f450-1f3fe", "telephone": "260e-fe0f", "envelope": "2709-fe0f", "revolving_hearts": "1f49e", "mega": "1f4e3", "montserrat": "1f1f2-1f1f8", "uganda": "1f1fa-1f1ec", "tropical_fish": "1f420", "hibiscus": "1f33a", "rainbow_flag": "1f3f3-fe0f-200d-1f308", "bangladesh": "1f1e7-1f1e9", "shipit": "shipit", "no_good_man_dark_skin_tone": "1f645-1f3ff-200d-2642-fe0f", "no_mouth": "1f636", "man_farmer": "1f468-200d-1f33e", "speak_no_evil": "1f64a", "level_slider": "1f39a", "guatemala": "1f1ec-1f1f9", "woman_factory_worker": "1f469-200d-1f3ed", "fork_and_knife": "1f374", "belarus": "1f1e7-1f1fe", "family_woman_woman_girl_boy_medium_dark_skin_tone": "1f469-1f3fe", "yum": "1f60b", "helicopter": "1f681", "busstop": "1f68f", "policewoman_light_skin_tone": "1f46e-1f3fb-200d-2640-fe0f", "man_technologist_medium_skin_tone": "1f468-1f3fd", "man_with_gua_pi_mao_light_skin_tone": "1f472-1f3fb", "man_astronaut_dark_skin_tone": "1f468-1f3ff", "skull": "1f480", "smirk_cat": "1f63c", "jeans": "1f456", "flipper": "1f42c", "dizzy": "1f4ab", "cocktail": "1f378", "basketball_woman_medium_skin_tone": "26f9-1f3fd-200d-2640-fe0f", "v_medium_light_skin_tone": "270c-1f3fc", "secret": "3299-fe0f", "seven": "0037-fe0f-20e3", "ghana": "1f1ec-1f1ed", "guernsey": "1f1ec-1f1ec", "kyrgyzstan": "1f1f0-1f1ec", "godmode": "godmode", "female_detective_dark_skin_tone": "1f575-1f3ff-200d-2640-fe0f", "fallen_leaf": "1f342", "snowflake": "2744-fe0f", "raised_hand_with_fingers_splayed_medium_dark_skin_tone": "1f590-1f3fe", "woman_health_worker_medium_dark_skin_tone": "1f469-1f3fe", "man_shrugging_dark_skin_tone": "1f937-1f3ff-200d-2642-fe0f", "pout": "1f621", "stars": "1f320", "family_woman_girl_boy": "1f469-200d-1f467-200d-1f466", "gun": "1f52b", "woman_scientist_dark_skin_tone": "1f469-1f3ff", "basketball_woman_dark_skin_tone": "26f9-1f3ff-200d-2640-fe0f", "biking_woman_medium_light_skin_tone": "1f6b4-1f3fc-200d-2640-fe0f", "family_man_girl_boy_medium_dark_skin_tone": "1f468-1f3fe", "oncoming_bus": "1f68d", "seat": "1f4ba", "vhs": "1f4fc", "lithuania": "1f1f1-1f1f9", "v_medium_dark_skin_tone": "270c-1f3fe", "man_with_gua_pi_mao_medium_skin_tone": "1f472-1f3fd", "frowning_face": "2639-fe0f", "shit": "1f4a9", "ab": "1f18e", "couple_with_heart_woman_woman_medium_skin_tone": "1f469-1f3fd", "family_woman_woman_girl_girl": "1f469-200d-1f469-200d-1f467-200d-1f467", "potato": "1f954", "minidisc": "1f4bd", "libya": "1f1f1-1f1fe", "point_right_dark_skin_tone": "1f449-1f3ff", "man_artist": "1f468-200d-1f3a8", "pineapple": "1f34d", "spaghetti": "1f35d", "couch_and_lamp": "1f6cb", "free": "1f193", "jamaica": "1f1ef-1f1f2", "woman_astronaut_dark_skin_tone": "1f469-1f3ff", "man_mechanic": "1f468-200d-1f527", "curry": "1f35b", "small_orange_diamond": "1f538", "pray": "1f64f", "hotdog": "1f32d", "currency_exchange": "1f4b1", "-1_dark_skin_tone": "1f44e-1f3ff", "man_office_worker_dark_skin_tone": "1f468-1f3ff", "clock830": "1f563", "policeman_medium_skin_tone": "1f46e-1f3fd-200d-2640-fe0f", "grin": "1f601", "water_buffalo": "1f403", "older_man_dark_skin_tone": "1f474-1f3ff", "business_suit_levitating_medium_dark_skin_tone": "1f574-1f3fe", "couple_with_heart_man_man_medium_light_skin_tone": "1f468-1f3fc", "rowing_man_medium_light_skin_tone": "1f6a3-1f3fc-200d-2640-fe0f", "purse": "1f45b", "slovenia": "1f1f8-1f1ee", "tipping_hand_man_medium_light_skin_tone": "1f481-1f3fc-200d-2642-fe0f", "madagascar": "1f1f2-1f1ec", "south_georgia_south_sandwich_islands": "1f1ec-1f1f8", "punch": "1f44a", "man_pilot": "1f468-200d-2708-fe0f", "owl": "1f989", "croissant": "1f950", "email": "2709-fe0f", "outbox_tray": "1f4e4", "construction_worker_man_medium_light_skin_tone": "1f477-1f3fc-200d-2640-fe0f", "mrs_claus_medium_dark_skin_tone": "1f936-1f3fe", "family_man_woman_girl_boy_dark_skin_tone": "1f468-1f3ff", "file_cabinet": "1f5c4", "hungary": "1f1ed-1f1fa", "pray_medium_dark_skin_tone": "1f64f-1f3fe", "woman_mechanic_dark_skin_tone": "1f469-1f3ff", "angel_medium_skin_tone": "1f47c-1f3fd", "man_dancing": "1f57a", "pound": "1f4b7", "macedonia": "1f1f2-1f1f0", "man_facepalming_medium_skin_tone": "1f926-1f3fd-200d-2642-fe0f", "scroll": "1f4dc", "rescue_worker_helmet": "26d1", "desktop_computer": "1f5a5", "heavy_plus_sign": "2795", "man_with_turban_medium_skin_tone": "1f473-1f3fd-200d-2640-fe0f", "horse_racing": "1f3c7", "low_brightness": "1f505", "loop": "27bf", "man_with_turban_medium_dark_skin_tone": "1f473-1f3fe-200d-2640-fe0f", "champagne": "1f37e", "construction_worker_woman_light_skin_tone": "1f477-1f3fb-200d-2640-fe0f", "man_teacher_light_skin_tone": "1f468-1f3fb", "family_woman_woman_girl_girl_medium_light_skin_tone": "1f469-1f3fc", "footprints": "1f463", "cloud_with_snow": "1f328", "man_cook_medium_light_skin_tone": "1f468-1f3fc", "woman_mechanic_medium_light_skin_tone": "1f469-1f3fc", "point_up_2": "1f446", "circus_tent": "1f3aa", "serbia": "1f1f7-1f1f8", "fist_right_medium_dark_skin_tone": "1f91c-1f3fe", "weight_lifting_woman_dark_skin_tone": "1f3cb-1f3ff-200d-2640-fe0f", "musical_score": "1f3bc", "violin": "1f3bb", "card_file_box": "1f5c3", "tipping_hand_woman_dark_skin_tone": "1f481-1f3ff-200d-2640-fe0f", "man_facepalming_dark_skin_tone": "1f926-1f3ff-200d-2642-fe0f", "open_mouth": "1f62e", "left_right_arrow": "2194-fe0f", "no_good_man_light_skin_tone": "1f645-1f3fb-200d-2642-fe0f", "man_factory_worker": "1f468-200d-1f3ed", "man_judge": "1f468-200d-2696-fe0f", "negative_squared_cross_mark": "274e", "bowing_woman_medium_skin_tone": "1f647-1f3fd-200d-2640-fe0f", "family_woman_boy_boy_light_skin_tone": "1f469-1f3fb", "battery": "1f50b", "couplekiss_man_man_medium_light_skin_tone": "1f468-1f3fc", "clock5": "1f554", "white_flag": "1f3f3-fe0f", "guadeloupe": "1f1ec-1f1f5", "muscle_medium_dark_skin_tone": "1f4aa-1f3fe", "man_scientist_dark_skin_tone": "1f468-1f3ff", "business_suit_levitating_light_skin_tone": "1f574-1f3fb", "woman_office_worker": "1f469-200d-1f4bc", "gift": "1f381", "sound": "1f509", "clubs": "2663-fe0f", "woman_scientist_medium_light_skin_tone": "1f469-1f3fc", "female_detective_medium_skin_tone": "1f575-1f3fd-200d-2640-fe0f", "man_singer_medium_light_skin_tone": "1f468-1f3fc", "family_man_girl": "1f468-200d-1f467", "bee": "1f41d", "full_moon_with_face": "1f31d", "black_medium_square": "25fc-fe0f", "zambia": "1f1ff-1f1f2", "raised_hands_dark_skin_tone": "1f64c-1f3ff", "family_woman_woman_boy_boy_medium_skin_tone": "1f469-1f3fd", "bread": "1f35e", "clock11": "1f55a", "man_office_worker_medium_light_skin_tone": "1f468-1f3fc", "woman_firefighter_medium_skin_tone": "1f469-1f3fd", "man_dancing_dark_skin_tone": "1f57a-1f3ff", "family_man_boy_medium_dark_skin_tone": "1f468-1f3fe", "hugs": "1f917", "roll_eyes": "1f644", "raised_hand": "270b", "tangerine": "1f34a", "grey_question": "2754", "princess_light_skin_tone": "1f478-1f3fb", "motor_boat": "1f6e5", "passport_control": "1f6c2", "man_artist_medium_dark_skin_tone": "1f468-1f3fe", "golfing_man_medium_skin_tone": "1f3cc-1f3fd-200d-2640-fe0f", "shirt": "1f455", "whale": "1f433", "apple": "1f34e", "ethiopia": "1f1ea-1f1f9", "jordan": "1f1ef-1f1f4", "biking_woman_medium_dark_skin_tone": "1f6b4-1f3fe-200d-2640-fe0f", "family_woman_girl_girl_medium_skin_tone": "1f469-1f3fd", "turkey": "1f983", "snowman_with_snow": "2603-fe0f", "fist_left_medium_skin_tone": "1f91b-1f3fd", "woman_with_turban_light_skin_tone": "1f473-1f3fb-200d-2640-fe0f", "woman_pilot_dark_skin_tone": "1f469-1f3ff", "family_woman_woman_boy_medium_skin_tone": "1f469-1f3fd", "purple_heart": "1f49c", "black_heart": "1f5a4", "haircut_man": "1f487-200d-2642-fe0f", "arrow_lower_left": "2199-fe0f", "guinea_bissau": "1f1ec-1f1fc", "sudan": "1f1f8-1f1e9", "woman_scientist_light_skin_tone": "1f469-1f3fb", "bust_in_silhouette": "1f464", "walking": "1f6b6", "european_union": "1f1ea-1f1fa", "running_woman_dark_skin_tone": "1f3c3-1f3ff-200d-2640-fe0f", "om": "1f549", "rowing_man_medium_skin_tone": "1f6a3-1f3fd-200d-2640-fe0f", "ideograph_advantage": "1f250", "nepal": "1f1f3-1f1f5", "syria": "1f1f8-1f1fe", "man_pilot_medium_dark_skin_tone": "1f468-1f3fe", "princess_medium_dark_skin_tone": "1f478-1f3fe", "watermelon": "1f349", "left_luggage": "1f6c5", "us": "1f1fa-1f1f8", "point_left_medium_light_skin_tone": "1f448-1f3fc", "family_man_girl_boy_medium_skin_tone": "1f468-1f3fd", "biking_man_medium_dark_skin_tone": "1f6b4-1f3fe-200d-2640-fe0f", "keycap_ten": "1f51f", "man_medium_light_skin_tone": "1f468-1f3fc", "couple_with_heart_man_man_light_skin_tone": "1f468-1f3fb", "family_man_man_girl_boy_light_skin_tone": "1f468-1f3fb", "dog2": "1f415", "art": "1f3a8", "taxi": "1f695", "motorcycle": "1f3cd", "diamond_shape_with_a_dot_inside": "1f4a0", "writing_hand_light_skin_tone": "270d-1f3fb", "woman_playing_water_polo_medium_dark_skin_tone": "1f93d-1f3fe-200d-2640-fe0f", "martial_arts_uniform": "1f94b", "spiral_calendar": "1f5d3", "older_man_medium_dark_skin_tone": "1f474-1f3fe", "woman_artist_medium_skin_tone": "1f469-1f3fd", "no_good_man_medium_dark_skin_tone": "1f645-1f3fe-200d-2642-fe0f", "family_woman_woman_boy_medium_light_skin_tone": "1f469-1f3fc", "ship": "1f6a2", "bangbang": "203c-fe0f", "israel": "1f1ee-1f1f1", "rowing_man_medium_dark_skin_tone": "1f6a3-1f3fe-200d-2640-fe0f", "calling": "1f4f2", "scorpius": "264f-fe0f", "vulcan_salute_dark_skin_tone": "1f596-1f3ff", "woman_office_worker_light_skin_tone": "1f469-1f3fb", "man_judge_light_skin_tone": "1f468-1f3fb", "family_woman_woman_boy_boy_medium_dark_skin_tone": "1f469-1f3fe", "woman_playing_handball": "1f93e-200d-2640-fe0f", "bridge_at_night": "1f309", "stop_sign": "1f6d1", "8ball": "1f3b1", "orange_book": "1f4d9", "couplekiss_man_woman": "1f48f", "no_mobile_phones": "1f4f5", "pouting_man_dark_skin_tone": "1f64e-1f3ff-200d-2642-fe0f", "man_juggling_light_skin_tone": "1f939-1f3fb-200d-2642-fe0f", "cold_sweat": "1f630", "star2": "1f31f", "taco": "1f32e", "point_right_medium_light_skin_tone": "1f449-1f3fc", "selfie_medium_dark_skin_tone": "1f933-1f3fe", "family_woman_woman_girl_boy_light_skin_tone": "1f469-1f3fb", "hankey": "1f4a9", "monkey_face": "1f435", "sweden": "1f1f8-1f1ea", "crocodile": "1f40a", "last_quarter_moon_with_face": "1f31c", "comet": "2604-fe0f", "caribbean_netherlands": "1f1e7-1f1f6", "walking_man_medium_dark_skin_tone": "1f6b6-1f3fe-200d-2640-fe0f", "basketball_man_medium_light_skin_tone": "26f9-1f3fc-200d-2640-fe0f", "deer": "1f98c", "clock4": "1f553", "christmas_island": "1f1e8-1f1fd", "fist_right_medium_skin_tone": "1f91c-1f3fd", "man_cook_dark_skin_tone": "1f468-1f3ff", "family_man_man_girl_medium_light_skin_tone": "1f468-1f3fc", "whale2": "1f40b", "sagittarius": "2650-fe0f", "children_crossing": "1f6b8", "call_me_hand_dark_skin_tone": "1f919-1f3ff", "ok_woman_medium_dark_skin_tone": "1f646-1f3fe-200d-2640-fe0f", "man_firefighter": "1f468-200d-1f692", "rewind": "23ea", "guardswoman_light_skin_tone": "1f482-1f3fb-200d-2640-fe0f", "woman_technologist_light_skin_tone": "1f469-1f3fb", "woman_pilot_light_skin_tone": "1f469-1f3fb", "raising_hand_woman_light_skin_tone": "1f64b-1f3fb-200d-2640-fe0f", "bowing_man_light_skin_tone": "1f647-1f3fb-200d-2640-fe0f", "frowning_man_medium_skin_tone": "1f64d-1f3fd-200d-2642-fe0f", "shark": "1f988", "sun_behind_rain_cloud": "1f326", "dagger": "1f5e1", "musical_note": "1f3b5", "crossed_fingers_dark_skin_tone": "1f91e-1f3ff", "man_pilot_medium_skin_tone": "1f468-1f3fd", "family_woman_boy_medium_skin_tone": "1f469-1f3fd", "golfing_man_medium_light_skin_tone": "1f3cc-1f3fc-200d-2640-fe0f", "girl": "1f467", "family_man_woman_boy_boy": "1f468-200d-1f469-200d-1f466-200d-1f466", "biking_woman": "1f6b4-200d-2640-fe0f", "cl": "1f191", "raised_back_of_hand_medium_dark_skin_tone": "1f91a-1f3fe", "raising_hand_woman_medium_light_skin_tone": "1f64b-1f3fc-200d-2640-fe0f", "baby_medium_skin_tone": "1f476-1f3fd", "guardsman": "1f482", "woman_astronaut": "1f469-200d-1f680", "tophat": "1f3a9", "honduras": "1f1ed-1f1f3", "mexico": "1f1f2-1f1fd", "nauru": "1f1f3-1f1f7", "mrs_claus_medium_light_skin_tone": "1f936-1f3fc", "weary": "1f629", "womans_hat": "1f452", "person_fencing": "1f93a", "u6708": "1f237-fe0f", "a": "1f170-fe0f", "de": "1f1e9-1f1ea", "lebanon": "1f1f1-1f1e7", "puerto_rico": "1f1f5-1f1f7", "man_mechanic_dark_skin_tone": "1f468-1f3ff", "policeman_dark_skin_tone": "1f46e-1f3ff-200d-2640-fe0f", "kissing_smiling_eyes": "1f619", "avocado": "1f951", "six_pointed_star": "1f52f", "record_button": "23fa", "family_woman_woman_girl_girl_light_skin_tone": "1f469-1f3fb", "mountain_biking_man_medium_light_skin_tone": "1f6b5-1f3fc-200d-2640-fe0f", "couple_with_heart_man_man_medium_dark_skin_tone": "1f468-1f3fe", "family_man_girl_girl_light_skin_tone": "1f468-1f3fb", "post_office": "1f3e3", "telescope": "1f52d", "baby_symbol": "1f6bc", "capital_abcd": "1f520", "woman_singer_light_skin_tone": "1f469-1f3fb", "woman_facepalming_medium_dark_skin_tone": "1f926-1f3fe-200d-2640-fe0f", "ant": "1f41c", "house": "1f3e0", "shield": "1f6e1", "yellow_heart": "1f49b", "u55b6": "1f23a", "senegal": "1f1f8-1f1f3", "united_arab_emirates": "1f1e6-1f1ea", "no_good_woman_medium_skin_tone": "1f645-1f3fd-200d-2640-fe0f", "running_man_medium_light_skin_tone": "1f3c3-1f3fc-200d-2640-fe0f", "beetle": "1f41e", "bus": "1f68c", "flight_arrival": "1f6ec", "black_large_square": "2b1b-fe0f", "white_large_square": "2b1c-fe0f", "woman_technologist_medium_light_skin_tone": "1f469-1f3fc", "skier": "26f7", "ok_hand_medium_light_skin_tone": "1f44c-1f3fc", "rofl": "1f923", "hushed": "1f62f", "ng_man": "1f645-200d-2642-fe0f", "running_woman": "1f3c3-200d-2640-fe0f", "family_man_man_girl_girl": "1f468-200d-1f468-200d-1f467-200d-1f467", "gorilla": "1f98d", "horse_racing_medium_light_skin_tone": "1f3c7-1f3fc", "mountain_biking_man_medium_skin_tone": "1f6b5-1f3fd-200d-2640-fe0f", "disappointed": "1f61e", "dolphin": "1f42c", "green_apple": "1f34f", "honey_pot": "1f36f", "georgia": "1f1ec-1f1ea", "business_suit_levitating": "1f574", "camera": "1f4f7", "ledger": "1f4d2", "woman_cook_medium_skin_tone": "1f469-1f3fd", "bahamas": "1f1e7-1f1f8", "family_woman_woman_girl": "1f469-200d-1f469-200d-1f467", "man_factory_worker_light_skin_tone": "1f468-1f3fb", "golfing_woman_medium_dark_skin_tone": "1f3cc-1f3fe-200d-2640-fe0f", "family_woman_girl": "1f469-200d-1f467", "turtle": "1f422", "mauritius": "1f1f2-1f1fa", "family_man_girl_light_skin_tone": "1f468-1f3fb", "hospital": "1f3e5", "church": "26ea-fe0f", "wheel_of_dharma": "2638-fe0f", "mongolia": "1f1f2-1f1f3", "man_facepalming": "1f926-200d-2642-fe0f", "bed": "1f6cf", "man_artist_medium_light_skin_tone": "1f468-1f3fc", "santa_medium_dark_skin_tone": "1f385-1f3fe", "family_man_woman_girl_medium_light_skin_tone": "1f468-1f3fc", "large_blue_diamond": "1f537", "colombia": "1f1e8-1f1f4", "philippines": "1f1f5-1f1ed", "older_woman_dark_skin_tone": "1f475-1f3ff", "woman_scientist_medium_skin_tone": "1f469-1f3fd", "couplekiss_woman_woman_light_skin_tone": "1f469-1f3fb", "male_detective": "1f575-fe0f", "crossed_swords": "2694-fe0f", "notebook": "1f4d3", "nail_care_medium_light_skin_tone": "1f485-1f3fc", "blonde_man_medium_skin_tone": "1f471-1f3fd-200d-2640-fe0f", "tipping_hand_man_light_skin_tone": "1f481-1f3fb-200d-2642-fe0f", "sweat_smile": "1f605", "white_medium_small_square": "25fd-fe0f", "bowtie": "bowtie", "reminder_ribbon": "1f397", "clamp": "1f5dc", "balance_scale": "2696-fe0f", "postal_horn": "1f4ef", "swimming_woman_dark_skin_tone": "1f3ca-1f3ff-200d-2640-fe0f", "raised_hand_with_fingers_splayed": "1f590", "ok_man": "1f646-200d-2642-fe0f", "new": "1f195", "male_detective_medium_dark_skin_tone": "1f575-1f3fe-200d-2640-fe0f", "computer_mouse": "1f5b1", "hourglass_flowing_sand": "23f3", "bahrain": "1f1e7-1f1ed", "djibouti": "1f1e9-1f1ef", "zimbabwe": "1f1ff-1f1fc", "swimming_man_light_skin_tone": "1f3ca-1f3fb-200d-2640-fe0f", "interrobang": "2049-fe0f", "clock8": "1f557", "pancakes": "1f95e", "thermometer": "1f321", "label": "1f3f7", "denmark": "1f1e9-1f1f0", "raising_hand_man_medium_light_skin_tone": "1f64b-1f3fc-200d-2642-fe0f", "frowning_man_medium_dark_skin_tone": "1f64d-1f3fe-200d-2642-fe0f", "point_right": "1f449", "guitar": "1f3b8", "family_woman_woman_girl_medium_skin_tone": "1f469-1f3fd", "woman_juggling_medium_skin_tone": "1f939-1f3fd-200d-2640-fe0f", "man_health_worker": "1f468-200d-2695-fe0f", "stew": "1f372", "surfing_man": "1f3c4", "twisted_rightwards_arrows": "1f500", "timor_leste": "1f1f9-1f1f1", "weight_lifting_woman": "1f3cb-fe0f-200d-2640-fe0f", "amphora": "1f3fa", "heart": "2764-fe0f", "bowing_man_medium_dark_skin_tone": "1f647-1f3fe-200d-2640-fe0f"} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_search.go deleted file mode 100644 index 71e2671c..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_search.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type EmojiSearch struct { - Term string `json:"term"` - PrefixOnly bool `json:"prefix_only"` -} - -func (es *EmojiSearch) ToJson() string { - b, _ := json.Marshal(es) - return string(b) -} - -func EmojiSearchFromJson(data io.Reader) *EmojiSearch { - var es *EmojiSearch - json.NewDecoder(data).Decode(&es) - return es -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/file.go b/vendor/github.com/mattermost/mattermost-server/v5/model/file.go deleted file mode 100644 index 9f76bac1..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/file.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -const ( - MaxImageSize = int64(6048 * 4032) // 24 megapixels, roughly 36MB as a raw image -) - -var ( - IMAGE_EXTENSIONS = [7]string{".jpg", ".jpeg", ".gif", ".bmp", ".png", ".tiff", "tif"} - IMAGE_MIME_TYPES = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff", ".tif": "image/tif"} -) - -type FileUploadResponse struct { - FileInfos []*FileInfo `json:"file_infos"` - ClientIds []string `json:"client_ids"` -} - -func FileUploadResponseFromJson(data io.Reader) *FileUploadResponse { - var o *FileUploadResponse - json.NewDecoder(data).Decode(&o) - return o -} - -func (o *FileUploadResponse) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/job.go b/vendor/github.com/mattermost/mattermost-server/v5/model/job.go deleted file mode 100644 index 85c1e9f8..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/job.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "net/http" - "time" -) - -const ( - JOB_TYPE_DATA_RETENTION = "data_retention" - JOB_TYPE_MESSAGE_EXPORT = "message_export" - JOB_TYPE_ELASTICSEARCH_POST_INDEXING = "elasticsearch_post_indexing" - JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION = "elasticsearch_post_aggregation" - JOB_TYPE_BLEVE_POST_INDEXING = "bleve_post_indexing" - JOB_TYPE_LDAP_SYNC = "ldap_sync" - JOB_TYPE_MIGRATIONS = "migrations" - JOB_TYPE_PLUGINS = "plugins" - JOB_TYPE_EXPIRY_NOTIFY = "expiry_notify" - - JOB_STATUS_PENDING = "pending" - JOB_STATUS_IN_PROGRESS = "in_progress" - JOB_STATUS_SUCCESS = "success" - JOB_STATUS_ERROR = "error" - JOB_STATUS_CANCEL_REQUESTED = "cancel_requested" - JOB_STATUS_CANCELED = "canceled" - JOB_STATUS_WARNING = "warning" -) - -type Job struct { - Id string `json:"id"` - Type string `json:"type"` - Priority int64 `json:"priority"` - CreateAt int64 `json:"create_at"` - StartAt int64 `json:"start_at"` - LastActivityAt int64 `json:"last_activity_at"` - Status string `json:"status"` - Progress int64 `json:"progress"` - Data map[string]string `json:"data"` -} - -func (j *Job) IsValid() *AppError { - if !IsValidId(j.Id) { - return NewAppError("Job.IsValid", "model.job.is_valid.id.app_error", nil, "id="+j.Id, http.StatusBadRequest) - } - - if j.CreateAt == 0 { - return NewAppError("Job.IsValid", "model.job.is_valid.create_at.app_error", nil, "id="+j.Id, http.StatusBadRequest) - } - - switch j.Type { - case JOB_TYPE_DATA_RETENTION: - case JOB_TYPE_ELASTICSEARCH_POST_INDEXING: - case JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION: - case JOB_TYPE_BLEVE_POST_INDEXING: - case JOB_TYPE_LDAP_SYNC: - case JOB_TYPE_MESSAGE_EXPORT: - case JOB_TYPE_MIGRATIONS: - case JOB_TYPE_PLUGINS: - case JOB_TYPE_EXPIRY_NOTIFY: - default: - return NewAppError("Job.IsValid", "model.job.is_valid.type.app_error", nil, "id="+j.Id, http.StatusBadRequest) - } - - switch j.Status { - case JOB_STATUS_PENDING: - case JOB_STATUS_IN_PROGRESS: - case JOB_STATUS_SUCCESS: - case JOB_STATUS_ERROR: - case JOB_STATUS_CANCEL_REQUESTED: - case JOB_STATUS_CANCELED: - default: - return NewAppError("Job.IsValid", "model.job.is_valid.status.app_error", nil, "id="+j.Id, http.StatusBadRequest) - } - - return nil -} - -func (j *Job) ToJson() string { - b, _ := json.Marshal(j) - return string(b) -} - -func JobFromJson(data io.Reader) *Job { - var job Job - if err := json.NewDecoder(data).Decode(&job); err == nil { - return &job - } else { - return nil - } -} - -func JobsToJson(jobs []*Job) string { - b, _ := json.Marshal(jobs) - return string(b) -} - -func JobsFromJson(data io.Reader) []*Job { - var jobs []*Job - if err := json.NewDecoder(data).Decode(&jobs); err == nil { - return jobs - } else { - return nil - } -} - -func (j *Job) DataToJson() string { - b, _ := json.Marshal(j.Data) - return string(b) -} - -type Worker interface { - Run() - Stop() - JobChannel() chan<- Job -} - -type Scheduler interface { - Name() string - JobType() string - Enabled(cfg *Config) bool - NextScheduleTime(cfg *Config, now time.Time, pendingJobs bool, lastSuccessfulJob *Job) *time.Time - ScheduleJob(cfg *Config, pendingJobs bool, lastSuccessfulJob *Job) (*Job, *AppError) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/mfa_secret.go b/vendor/github.com/mattermost/mattermost-server/v5/model/mfa_secret.go deleted file mode 100644 index 979ff342..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/mfa_secret.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type MfaSecret struct { - Secret string `json:"secret"` - QRCode string `json:"qr_code"` -} - -func (me *MfaSecret) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func MfaSecretFromJson(data io.Reader) *MfaSecret { - var me *MfaSecret - json.NewDecoder(data).Decode(&me) - return me -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go b/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go deleted file mode 100644 index 7dd08bef..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -const ( - MIGRATION_KEY_ADVANCED_PERMISSIONS_PHASE_2 = "migration_advanced_permissions_phase_2" - - MIGRATION_KEY_EMOJI_PERMISSIONS_SPLIT = "emoji_permissions_split" - MIGRATION_KEY_WEBHOOK_PERMISSIONS_SPLIT = "webhook_permissions_split" - MIGRATION_KEY_LIST_JOIN_PUBLIC_PRIVATE_TEAMS = "list_join_public_private_teams" - MIGRATION_KEY_REMOVE_PERMANENT_DELETE_USER = "remove_permanent_delete_user" - MIGRATION_KEY_ADD_BOT_PERMISSIONS = "add_bot_permissions" - MIGRATION_KEY_APPLY_CHANNEL_MANAGE_DELETE_TO_CHANNEL_USER = "apply_channel_manage_delete_to_channel_user" - MIGRATION_KEY_REMOVE_CHANNEL_MANAGE_DELETE_FROM_TEAM_USER = "remove_channel_manage_delete_from_team_user" - MIGRATION_KEY_VIEW_MEMBERS_NEW_PERMISSION = "view_members_new_permission" - MIGRATION_KEY_ADD_MANAGE_GUESTS_PERMISSIONS = "add_manage_guests_permissions" - MIGRATION_KEY_CHANNEL_MODERATIONS_PERMISSIONS = "channel_moderations_permissions" - MIGRATION_KEY_ADD_USE_GROUP_MENTIONS_PERMISSION = "add_use_group_mentions_permission" -) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go b/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go deleted file mode 100644 index cc3c5a70..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go +++ /dev/null @@ -1,676 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -const ( - PERMISSION_SCOPE_SYSTEM = "system_scope" - PERMISSION_SCOPE_TEAM = "team_scope" - PERMISSION_SCOPE_CHANNEL = "channel_scope" -) - -type Permission struct { - Id string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Scope string `json:"scope"` -} - -var PERMISSION_INVITE_USER *Permission -var PERMISSION_ADD_USER_TO_TEAM *Permission -var PERMISSION_USE_SLASH_COMMANDS *Permission -var PERMISSION_MANAGE_SLASH_COMMANDS *Permission -var PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS *Permission -var PERMISSION_CREATE_PUBLIC_CHANNEL *Permission -var PERMISSION_CREATE_PRIVATE_CHANNEL *Permission -var PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS *Permission -var PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS *Permission -var PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE *Permission -var PERMISSION_MANAGE_ROLES *Permission -var PERMISSION_MANAGE_TEAM_ROLES *Permission -var PERMISSION_MANAGE_CHANNEL_ROLES *Permission -var PERMISSION_CREATE_DIRECT_CHANNEL *Permission -var PERMISSION_CREATE_GROUP_CHANNEL *Permission -var PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES *Permission -var PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES *Permission -var PERMISSION_LIST_PUBLIC_TEAMS *Permission -var PERMISSION_JOIN_PUBLIC_TEAMS *Permission -var PERMISSION_LIST_PRIVATE_TEAMS *Permission -var PERMISSION_JOIN_PRIVATE_TEAMS *Permission -var PERMISSION_LIST_TEAM_CHANNELS *Permission -var PERMISSION_JOIN_PUBLIC_CHANNELS *Permission -var PERMISSION_DELETE_PUBLIC_CHANNEL *Permission -var PERMISSION_DELETE_PRIVATE_CHANNEL *Permission -var PERMISSION_EDIT_OTHER_USERS *Permission -var PERMISSION_READ_CHANNEL *Permission -var PERMISSION_READ_PUBLIC_CHANNEL *Permission -var PERMISSION_ADD_REACTION *Permission -var PERMISSION_REMOVE_REACTION *Permission -var PERMISSION_REMOVE_OTHERS_REACTIONS *Permission -var PERMISSION_PERMANENT_DELETE_USER *Permission -var PERMISSION_UPLOAD_FILE *Permission -var PERMISSION_GET_PUBLIC_LINK *Permission -var PERMISSION_MANAGE_WEBHOOKS *Permission -var PERMISSION_MANAGE_OTHERS_WEBHOOKS *Permission -var PERMISSION_MANAGE_INCOMING_WEBHOOKS *Permission -var PERMISSION_MANAGE_OUTGOING_WEBHOOKS *Permission -var PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS *Permission -var PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS *Permission -var PERMISSION_MANAGE_OAUTH *Permission -var PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH *Permission -var PERMISSION_MANAGE_EMOJIS *Permission -var PERMISSION_MANAGE_OTHERS_EMOJIS *Permission -var PERMISSION_CREATE_EMOJIS *Permission -var PERMISSION_DELETE_EMOJIS *Permission -var PERMISSION_DELETE_OTHERS_EMOJIS *Permission -var PERMISSION_CREATE_POST *Permission -var PERMISSION_CREATE_POST_PUBLIC *Permission -var PERMISSION_CREATE_POST_EPHEMERAL *Permission -var PERMISSION_EDIT_POST *Permission -var PERMISSION_EDIT_OTHERS_POSTS *Permission -var PERMISSION_DELETE_POST *Permission -var PERMISSION_DELETE_OTHERS_POSTS *Permission -var PERMISSION_REMOVE_USER_FROM_TEAM *Permission -var PERMISSION_CREATE_TEAM *Permission -var PERMISSION_MANAGE_TEAM *Permission -var PERMISSION_IMPORT_TEAM *Permission -var PERMISSION_VIEW_TEAM *Permission -var PERMISSION_LIST_USERS_WITHOUT_TEAM *Permission -var PERMISSION_MANAGE_JOBS *Permission -var PERMISSION_CREATE_USER_ACCESS_TOKEN *Permission -var PERMISSION_READ_USER_ACCESS_TOKEN *Permission -var PERMISSION_REVOKE_USER_ACCESS_TOKEN *Permission -var PERMISSION_CREATE_BOT *Permission -var PERMISSION_ASSIGN_BOT *Permission -var PERMISSION_READ_BOTS *Permission -var PERMISSION_READ_OTHERS_BOTS *Permission -var PERMISSION_MANAGE_BOTS *Permission -var PERMISSION_MANAGE_OTHERS_BOTS *Permission -var PERMISSION_VIEW_MEMBERS *Permission -var PERMISSION_INVITE_GUEST *Permission -var PERMISSION_PROMOTE_GUEST *Permission -var PERMISSION_DEMOTE_TO_GUEST *Permission -var PERMISSION_USE_CHANNEL_MENTIONS *Permission -var PERMISSION_USE_GROUP_MENTIONS *Permission - -// General permission that encompasses all system admin functions -// in the future this could be broken up to allow access to some -// admin functions but not others -var PERMISSION_MANAGE_SYSTEM *Permission - -var ALL_PERMISSIONS []*Permission - -var CHANNEL_MODERATED_PERMISSIONS []string -var CHANNEL_MODERATED_PERMISSIONS_MAP map[string]string - -func initializePermissions() { - PERMISSION_INVITE_USER = &Permission{ - "invite_user", - "authentication.permissions.team_invite_user.name", - "authentication.permissions.team_invite_user.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_ADD_USER_TO_TEAM = &Permission{ - "add_user_to_team", - "authentication.permissions.add_user_to_team.name", - "authentication.permissions.add_user_to_team.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_USE_SLASH_COMMANDS = &Permission{ - "use_slash_commands", - "authentication.permissions.team_use_slash_commands.name", - "authentication.permissions.team_use_slash_commands.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_MANAGE_SLASH_COMMANDS = &Permission{ - "manage_slash_commands", - "authentication.permissions.manage_slash_commands.name", - "authentication.permissions.manage_slash_commands.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS = &Permission{ - "manage_others_slash_commands", - "authentication.permissions.manage_others_slash_commands.name", - "authentication.permissions.manage_others_slash_commands.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_CREATE_PUBLIC_CHANNEL = &Permission{ - "create_public_channel", - "authentication.permissions.create_public_channel.name", - "authentication.permissions.create_public_channel.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_CREATE_PRIVATE_CHANNEL = &Permission{ - "create_private_channel", - "authentication.permissions.create_private_channel.name", - "authentication.permissions.create_private_channel.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS = &Permission{ - "manage_public_channel_members", - "authentication.permissions.manage_public_channel_members.name", - "authentication.permissions.manage_public_channel_members.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS = &Permission{ - "manage_private_channel_members", - "authentication.permissions.manage_private_channel_members.name", - "authentication.permissions.manage_private_channel_members.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE = &Permission{ - "assign_system_admin_role", - "authentication.permissions.assign_system_admin_role.name", - "authentication.permissions.assign_system_admin_role.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_MANAGE_ROLES = &Permission{ - "manage_roles", - "authentication.permissions.manage_roles.name", - "authentication.permissions.manage_roles.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_MANAGE_TEAM_ROLES = &Permission{ - "manage_team_roles", - "authentication.permissions.manage_team_roles.name", - "authentication.permissions.manage_team_roles.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_MANAGE_CHANNEL_ROLES = &Permission{ - "manage_channel_roles", - "authentication.permissions.manage_channel_roles.name", - "authentication.permissions.manage_channel_roles.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_MANAGE_SYSTEM = &Permission{ - "manage_system", - "authentication.permissions.manage_system.name", - "authentication.permissions.manage_system.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_CREATE_DIRECT_CHANNEL = &Permission{ - "create_direct_channel", - "authentication.permissions.create_direct_channel.name", - "authentication.permissions.create_direct_channel.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_CREATE_GROUP_CHANNEL = &Permission{ - "create_group_channel", - "authentication.permissions.create_group_channel.name", - "authentication.permissions.create_group_channel.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES = &Permission{ - "manage_public_channel_properties", - "authentication.permissions.manage_public_channel_properties.name", - "authentication.permissions.manage_public_channel_properties.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES = &Permission{ - "manage_private_channel_properties", - "authentication.permissions.manage_private_channel_properties.name", - "authentication.permissions.manage_private_channel_properties.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_LIST_PUBLIC_TEAMS = &Permission{ - "list_public_teams", - "authentication.permissions.list_public_teams.name", - "authentication.permissions.list_public_teams.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_JOIN_PUBLIC_TEAMS = &Permission{ - "join_public_teams", - "authentication.permissions.join_public_teams.name", - "authentication.permissions.join_public_teams.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_LIST_PRIVATE_TEAMS = &Permission{ - "list_private_teams", - "authentication.permissions.list_private_teams.name", - "authentication.permissions.list_private_teams.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_JOIN_PRIVATE_TEAMS = &Permission{ - "join_private_teams", - "authentication.permissions.join_private_teams.name", - "authentication.permissions.join_private_teams.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_LIST_TEAM_CHANNELS = &Permission{ - "list_team_channels", - "authentication.permissions.list_team_channels.name", - "authentication.permissions.list_team_channels.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_JOIN_PUBLIC_CHANNELS = &Permission{ - "join_public_channels", - "authentication.permissions.join_public_channels.name", - "authentication.permissions.join_public_channels.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_DELETE_PUBLIC_CHANNEL = &Permission{ - "delete_public_channel", - "authentication.permissions.delete_public_channel.name", - "authentication.permissions.delete_public_channel.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_DELETE_PRIVATE_CHANNEL = &Permission{ - "delete_private_channel", - "authentication.permissions.delete_private_channel.name", - "authentication.permissions.delete_private_channel.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_EDIT_OTHER_USERS = &Permission{ - "edit_other_users", - "authentication.permissions.edit_other_users.name", - "authentication.permissions.edit_other_users.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_READ_CHANNEL = &Permission{ - "read_channel", - "authentication.permissions.read_channel.name", - "authentication.permissions.read_channel.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_READ_PUBLIC_CHANNEL = &Permission{ - "read_public_channel", - "authentication.permissions.read_public_channel.name", - "authentication.permissions.read_public_channel.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_ADD_REACTION = &Permission{ - "add_reaction", - "authentication.permissions.add_reaction.name", - "authentication.permissions.add_reaction.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_REMOVE_REACTION = &Permission{ - "remove_reaction", - "authentication.permissions.remove_reaction.name", - "authentication.permissions.remove_reaction.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_REMOVE_OTHERS_REACTIONS = &Permission{ - "remove_others_reactions", - "authentication.permissions.remove_others_reactions.name", - "authentication.permissions.remove_others_reactions.description", - PERMISSION_SCOPE_CHANNEL, - } - // DEPRECATED - PERMISSION_PERMANENT_DELETE_USER = &Permission{ - "permanent_delete_user", - "authentication.permissions.permanent_delete_user.name", - "authentication.permissions.permanent_delete_user.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_UPLOAD_FILE = &Permission{ - "upload_file", - "authentication.permissions.upload_file.name", - "authentication.permissions.upload_file.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_GET_PUBLIC_LINK = &Permission{ - "get_public_link", - "authentication.permissions.get_public_link.name", - "authentication.permissions.get_public_link.description", - PERMISSION_SCOPE_SYSTEM, - } - // DEPRECATED - PERMISSION_MANAGE_WEBHOOKS = &Permission{ - "manage_webhooks", - "authentication.permissions.manage_webhooks.name", - "authentication.permissions.manage_webhooks.description", - PERMISSION_SCOPE_TEAM, - } - // DEPRECATED - PERMISSION_MANAGE_OTHERS_WEBHOOKS = &Permission{ - "manage_others_webhooks", - "authentication.permissions.manage_others_webhooks.name", - "authentication.permissions.manage_others_webhooks.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_MANAGE_INCOMING_WEBHOOKS = &Permission{ - "manage_incoming_webhooks", - "authentication.permissions.manage_incoming_webhooks.name", - "authentication.permissions.manage_incoming_webhooks.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_MANAGE_OUTGOING_WEBHOOKS = &Permission{ - "manage_outgoing_webhooks", - "authentication.permissions.manage_outgoing_webhooks.name", - "authentication.permissions.manage_outgoing_webhooks.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS = &Permission{ - "manage_others_incoming_webhooks", - "authentication.permissions.manage_others_incoming_webhooks.name", - "authentication.permissions.manage_others_incoming_webhooks.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS = &Permission{ - "manage_others_outgoing_webhooks", - "authentication.permissions.manage_others_outgoing_webhooks.name", - "authentication.permissions.manage_others_outgoing_webhooks.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_MANAGE_OAUTH = &Permission{ - "manage_oauth", - "authentication.permissions.manage_oauth.name", - "authentication.permissions.manage_oauth.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH = &Permission{ - "manage_system_wide_oauth", - "authentication.permissions.manage_system_wide_oauth.name", - "authentication.permissions.manage_system_wide_oauth.description", - PERMISSION_SCOPE_SYSTEM, - } - // DEPRECATED - PERMISSION_MANAGE_EMOJIS = &Permission{ - "manage_emojis", - "authentication.permissions.manage_emojis.name", - "authentication.permissions.manage_emojis.description", - PERMISSION_SCOPE_TEAM, - } - // DEPRECATED - PERMISSION_MANAGE_OTHERS_EMOJIS = &Permission{ - "manage_others_emojis", - "authentication.permissions.manage_others_emojis.name", - "authentication.permissions.manage_others_emojis.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_CREATE_EMOJIS = &Permission{ - "create_emojis", - "authentication.permissions.create_emojis.name", - "authentication.permissions.create_emojis.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_DELETE_EMOJIS = &Permission{ - "delete_emojis", - "authentication.permissions.delete_emojis.name", - "authentication.permissions.delete_emojis.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_DELETE_OTHERS_EMOJIS = &Permission{ - "delete_others_emojis", - "authentication.permissions.delete_others_emojis.name", - "authentication.permissions.delete_others_emojis.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_CREATE_POST = &Permission{ - "create_post", - "authentication.permissions.create_post.name", - "authentication.permissions.create_post.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_CREATE_POST_PUBLIC = &Permission{ - "create_post_public", - "authentication.permissions.create_post_public.name", - "authentication.permissions.create_post_public.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_CREATE_POST_EPHEMERAL = &Permission{ - "create_post_ephemeral", - "authentication.permissions.create_post_ephemeral.name", - "authentication.permissions.create_post_ephemeral.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_EDIT_POST = &Permission{ - "edit_post", - "authentication.permissions.edit_post.name", - "authentication.permissions.edit_post.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_EDIT_OTHERS_POSTS = &Permission{ - "edit_others_posts", - "authentication.permissions.edit_others_posts.name", - "authentication.permissions.edit_others_posts.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_DELETE_POST = &Permission{ - "delete_post", - "authentication.permissions.delete_post.name", - "authentication.permissions.delete_post.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_DELETE_OTHERS_POSTS = &Permission{ - "delete_others_posts", - "authentication.permissions.delete_others_posts.name", - "authentication.permissions.delete_others_posts.description", - PERMISSION_SCOPE_CHANNEL, - } - PERMISSION_REMOVE_USER_FROM_TEAM = &Permission{ - "remove_user_from_team", - "authentication.permissions.remove_user_from_team.name", - "authentication.permissions.remove_user_from_team.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_CREATE_TEAM = &Permission{ - "create_team", - "authentication.permissions.create_team.name", - "authentication.permissions.create_team.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_MANAGE_TEAM = &Permission{ - "manage_team", - "authentication.permissions.manage_team.name", - "authentication.permissions.manage_team.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_IMPORT_TEAM = &Permission{ - "import_team", - "authentication.permissions.import_team.name", - "authentication.permissions.import_team.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_VIEW_TEAM = &Permission{ - "view_team", - "authentication.permissions.view_team.name", - "authentication.permissions.view_team.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_LIST_USERS_WITHOUT_TEAM = &Permission{ - "list_users_without_team", - "authentication.permissions.list_users_without_team.name", - "authentication.permissions.list_users_without_team.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_CREATE_USER_ACCESS_TOKEN = &Permission{ - "create_user_access_token", - "authentication.permissions.create_user_access_token.name", - "authentication.permissions.create_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_READ_USER_ACCESS_TOKEN = &Permission{ - "read_user_access_token", - "authentication.permissions.read_user_access_token.name", - "authentication.permissions.read_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_REVOKE_USER_ACCESS_TOKEN = &Permission{ - "revoke_user_access_token", - "authentication.permissions.revoke_user_access_token.name", - "authentication.permissions.revoke_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_CREATE_BOT = &Permission{ - "create_bot", - "authentication.permissions.create_bot.name", - "authentication.permissions.create_bot.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_ASSIGN_BOT = &Permission{ - "assign_bot", - "authentication.permissions.assign_bot.name", - "authentication.permissions.assign_bot.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_READ_BOTS = &Permission{ - "read_bots", - "authentication.permissions.read_bots.name", - "authentication.permissions.read_bots.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_READ_OTHERS_BOTS = &Permission{ - "read_others_bots", - "authentication.permissions.read_others_bots.name", - "authentication.permissions.read_others_bots.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_MANAGE_BOTS = &Permission{ - "manage_bots", - "authentication.permissions.manage_bots.name", - "authentication.permissions.manage_bots.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_MANAGE_OTHERS_BOTS = &Permission{ - "manage_others_bots", - "authentication.permissions.manage_others_bots.name", - "authentication.permissions.manage_others_bots.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_MANAGE_JOBS = &Permission{ - "manage_jobs", - "authentication.permisssions.manage_jobs.name", - "authentication.permisssions.manage_jobs.description", - PERMISSION_SCOPE_SYSTEM, - } - PERMISSION_VIEW_MEMBERS = &Permission{ - "view_members", - "authentication.permisssions.view_members.name", - "authentication.permisssions.view_members.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_INVITE_GUEST = &Permission{ - "invite_guest", - "authentication.permissions.invite_guest.name", - "authentication.permissions.invite_guest.description", - PERMISSION_SCOPE_TEAM, - } - PERMISSION_PROMOTE_GUEST = &Permission{ - "promote_guest", - "authentication.permissions.promote_guest.name", - "authentication.permissions.promote_guest.description", - PERMISSION_SCOPE_SYSTEM, - } - - PERMISSION_DEMOTE_TO_GUEST = &Permission{ - "demote_to_guest", - "authentication.permissions.demote_to_guest.name", - "authentication.permissions.demote_to_guest.description", - PERMISSION_SCOPE_SYSTEM, - } - - PERMISSION_USE_CHANNEL_MENTIONS = &Permission{ - "use_channel_mentions", - "authentication.permissions.use_channel_mentions.name", - "authentication.permissions.use_channel_mentions.description", - PERMISSION_SCOPE_CHANNEL, - } - - PERMISSION_USE_GROUP_MENTIONS = &Permission{ - "use_group_mentions", - "authentication.permissions.use_group_mentions.name", - "authentication.permissions.use_group_mentions.description", - PERMISSION_SCOPE_CHANNEL, - } - - ALL_PERMISSIONS = []*Permission{ - PERMISSION_INVITE_USER, - PERMISSION_ADD_USER_TO_TEAM, - PERMISSION_USE_SLASH_COMMANDS, - PERMISSION_MANAGE_SLASH_COMMANDS, - PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS, - PERMISSION_CREATE_PUBLIC_CHANNEL, - PERMISSION_CREATE_PRIVATE_CHANNEL, - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS, - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS, - PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE, - PERMISSION_MANAGE_ROLES, - PERMISSION_MANAGE_TEAM_ROLES, - PERMISSION_MANAGE_CHANNEL_ROLES, - PERMISSION_CREATE_DIRECT_CHANNEL, - PERMISSION_CREATE_GROUP_CHANNEL, - PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES, - PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES, - PERMISSION_LIST_PUBLIC_TEAMS, - PERMISSION_JOIN_PUBLIC_TEAMS, - PERMISSION_LIST_PRIVATE_TEAMS, - PERMISSION_JOIN_PRIVATE_TEAMS, - PERMISSION_LIST_TEAM_CHANNELS, - PERMISSION_JOIN_PUBLIC_CHANNELS, - PERMISSION_DELETE_PUBLIC_CHANNEL, - PERMISSION_DELETE_PRIVATE_CHANNEL, - PERMISSION_EDIT_OTHER_USERS, - PERMISSION_READ_CHANNEL, - PERMISSION_READ_PUBLIC_CHANNEL, - PERMISSION_ADD_REACTION, - PERMISSION_REMOVE_REACTION, - PERMISSION_REMOVE_OTHERS_REACTIONS, - PERMISSION_PERMANENT_DELETE_USER, - PERMISSION_UPLOAD_FILE, - PERMISSION_GET_PUBLIC_LINK, - PERMISSION_MANAGE_WEBHOOKS, - PERMISSION_MANAGE_OTHERS_WEBHOOKS, - PERMISSION_MANAGE_INCOMING_WEBHOOKS, - PERMISSION_MANAGE_OUTGOING_WEBHOOKS, - PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS, - PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS, - PERMISSION_MANAGE_OAUTH, - PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH, - PERMISSION_MANAGE_EMOJIS, - PERMISSION_MANAGE_OTHERS_EMOJIS, - PERMISSION_CREATE_EMOJIS, - PERMISSION_DELETE_EMOJIS, - PERMISSION_DELETE_OTHERS_EMOJIS, - PERMISSION_CREATE_POST, - PERMISSION_CREATE_POST_PUBLIC, - PERMISSION_CREATE_POST_EPHEMERAL, - PERMISSION_EDIT_POST, - PERMISSION_EDIT_OTHERS_POSTS, - PERMISSION_DELETE_POST, - PERMISSION_DELETE_OTHERS_POSTS, - PERMISSION_REMOVE_USER_FROM_TEAM, - PERMISSION_CREATE_TEAM, - PERMISSION_MANAGE_TEAM, - PERMISSION_IMPORT_TEAM, - PERMISSION_VIEW_TEAM, - PERMISSION_LIST_USERS_WITHOUT_TEAM, - PERMISSION_MANAGE_JOBS, - PERMISSION_CREATE_USER_ACCESS_TOKEN, - PERMISSION_READ_USER_ACCESS_TOKEN, - PERMISSION_REVOKE_USER_ACCESS_TOKEN, - PERMISSION_CREATE_BOT, - PERMISSION_READ_BOTS, - PERMISSION_READ_OTHERS_BOTS, - PERMISSION_MANAGE_BOTS, - PERMISSION_MANAGE_OTHERS_BOTS, - PERMISSION_MANAGE_SYSTEM, - PERMISSION_VIEW_MEMBERS, - PERMISSION_INVITE_GUEST, - PERMISSION_PROMOTE_GUEST, - PERMISSION_DEMOTE_TO_GUEST, - PERMISSION_USE_CHANNEL_MENTIONS, - PERMISSION_USE_GROUP_MENTIONS, - } - - CHANNEL_MODERATED_PERMISSIONS = []string{ - PERMISSION_CREATE_POST.Id, - "create_reactions", - "manage_members", - PERMISSION_USE_CHANNEL_MENTIONS.Id, - } - - CHANNEL_MODERATED_PERMISSIONS_MAP = map[string]string{ - PERMISSION_CREATE_POST.Id: CHANNEL_MODERATED_PERMISSIONS[0], - PERMISSION_ADD_REACTION.Id: CHANNEL_MODERATED_PERMISSIONS[1], - PERMISSION_REMOVE_REACTION.Id: CHANNEL_MODERATED_PERMISSIONS[1], - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id: CHANNEL_MODERATED_PERMISSIONS[2], - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id: CHANNEL_MODERATED_PERMISSIONS[2], - PERMISSION_USE_CHANNEL_MENTIONS.Id: CHANNEL_MODERATED_PERMISSIONS[3], - } -} - -func init() { - initializePermissions() -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_event_data.go b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_event_data.go deleted file mode 100644 index c704c993..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_event_data.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -// PluginEventData used to notify peers about plugin changes. -type PluginEventData struct { - Id string `json:"id"` -} - -func (p *PluginEventData) ToJson() string { - b, _ := json.Marshal(p) - return string(b) -} - -func PluginEventDataFromJson(data io.Reader) PluginEventData { - var m PluginEventData - json.NewDecoder(data).Decode(&m) - return m -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/preferences.go b/vendor/github.com/mattermost/mattermost-server/v5/model/preferences.go deleted file mode 100644 index 6ed845b6..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/preferences.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type Preferences []Preference - -func (o *Preferences) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func PreferencesFromJson(data io.Reader) (Preferences, error) { - decoder := json.NewDecoder(data) - var o Preferences - err := decoder.Decode(&o) - if err == nil { - return o, nil - } else { - return nil, err - } -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/push_response.go b/vendor/github.com/mattermost/mattermost-server/v5/model/push_response.go deleted file mode 100644 index e6e8059b..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/push_response.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -const ( - PUSH_STATUS = "status" - PUSH_STATUS_OK = "OK" - PUSH_STATUS_FAIL = "FAIL" - PUSH_STATUS_REMOVE = "REMOVE" - PUSH_STATUS_ERROR_MSG = "error" -) - -type PushResponse map[string]string - -func NewOkPushResponse() PushResponse { - m := make(map[string]string) - m[PUSH_STATUS] = PUSH_STATUS_OK - return m -} - -func NewRemovePushResponse() PushResponse { - m := make(map[string]string) - m[PUSH_STATUS] = PUSH_STATUS_REMOVE - return m -} - -func NewErrorPushResponse(message string) PushResponse { - m := make(map[string]string) - m[PUSH_STATUS] = PUSH_STATUS_FAIL - m[PUSH_STATUS_ERROR_MSG] = message - return m -} - -func (me *PushResponse) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func PushResponseFromJson(data io.Reader) PushResponse { - decoder := json.NewDecoder(data) - - var objmap PushResponse - if err := decoder.Decode(&objmap); err != nil { - return make(map[string]string) - } else { - return objmap - } -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/reaction.go b/vendor/github.com/mattermost/mattermost-server/v5/model/reaction.go deleted file mode 100644 index 50879c67..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/reaction.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "net/http" - "regexp" -) - -type Reaction struct { - UserId string `json:"user_id"` - PostId string `json:"post_id"` - EmojiName string `json:"emoji_name"` - CreateAt int64 `json:"create_at"` -} - -func (o *Reaction) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func ReactionFromJson(data io.Reader) *Reaction { - var o Reaction - - if err := json.NewDecoder(data).Decode(&o); err != nil { - return nil - } else { - return &o - } -} - -func ReactionsToJson(o []*Reaction) string { - b, _ := json.Marshal(o) - return string(b) -} - -func MapPostIdToReactionsToJson(o map[string][]*Reaction) string { - b, _ := json.Marshal(o) - return string(b) -} - -func MapPostIdToReactionsFromJson(data io.Reader) map[string][]*Reaction { - decoder := json.NewDecoder(data) - - var objmap map[string][]*Reaction - if err := decoder.Decode(&objmap); err != nil { - return make(map[string][]*Reaction) - } else { - return objmap - } -} - -func ReactionsFromJson(data io.Reader) []*Reaction { - var o []*Reaction - - if err := json.NewDecoder(data).Decode(&o); err != nil { - return nil - } else { - return o - } -} - -func (o *Reaction) IsValid() *AppError { - if !IsValidId(o.UserId) { - return NewAppError("Reaction.IsValid", "model.reaction.is_valid.user_id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) - } - - if !IsValidId(o.PostId) { - return NewAppError("Reaction.IsValid", "model.reaction.is_valid.post_id.app_error", nil, "post_id="+o.PostId, http.StatusBadRequest) - } - - validName := regexp.MustCompile(`^[a-zA-Z0-9\-\+_]+$`) - - if len(o.EmojiName) == 0 || len(o.EmojiName) > EMOJI_NAME_MAX_LENGTH || !validName.MatchString(o.EmojiName) { - return NewAppError("Reaction.IsValid", "model.reaction.is_valid.emoji_name.app_error", nil, "emoji_name="+o.EmojiName, http.StatusBadRequest) - } - - if o.CreateAt == 0 { - return NewAppError("Reaction.IsValid", "model.reaction.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (o *Reaction) PreSave() { - if o.CreateAt == 0 { - o.CreateAt = GetMillis() - } -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/role.go b/vendor/github.com/mattermost/mattermost-server/v5/model/role.go deleted file mode 100644 index 837db366..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/role.go +++ /dev/null @@ -1,649 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "strings" -) - -var BuiltInSchemeManagedRoleIDs []string - -func init() { - BuiltInSchemeManagedRoleIDs = []string{ - SYSTEM_GUEST_ROLE_ID, - SYSTEM_USER_ROLE_ID, - SYSTEM_ADMIN_ROLE_ID, - SYSTEM_POST_ALL_ROLE_ID, - SYSTEM_POST_ALL_PUBLIC_ROLE_ID, - SYSTEM_USER_ACCESS_TOKEN_ROLE_ID, - - TEAM_GUEST_ROLE_ID, - TEAM_USER_ROLE_ID, - TEAM_ADMIN_ROLE_ID, - TEAM_POST_ALL_ROLE_ID, - TEAM_POST_ALL_PUBLIC_ROLE_ID, - - CHANNEL_GUEST_ROLE_ID, - CHANNEL_USER_ROLE_ID, - CHANNEL_ADMIN_ROLE_ID, - } -} - -type RoleType string -type RoleScope string - -const ( - SYSTEM_GUEST_ROLE_ID = "system_guest" - SYSTEM_USER_ROLE_ID = "system_user" - SYSTEM_ADMIN_ROLE_ID = "system_admin" - SYSTEM_POST_ALL_ROLE_ID = "system_post_all" - SYSTEM_POST_ALL_PUBLIC_ROLE_ID = "system_post_all_public" - SYSTEM_USER_ACCESS_TOKEN_ROLE_ID = "system_user_access_token" - - TEAM_GUEST_ROLE_ID = "team_guest" - TEAM_USER_ROLE_ID = "team_user" - TEAM_ADMIN_ROLE_ID = "team_admin" - TEAM_POST_ALL_ROLE_ID = "team_post_all" - TEAM_POST_ALL_PUBLIC_ROLE_ID = "team_post_all_public" - - CHANNEL_GUEST_ROLE_ID = "channel_guest" - CHANNEL_USER_ROLE_ID = "channel_user" - CHANNEL_ADMIN_ROLE_ID = "channel_admin" - - ROLE_NAME_MAX_LENGTH = 64 - ROLE_DISPLAY_NAME_MAX_LENGTH = 128 - ROLE_DESCRIPTION_MAX_LENGTH = 1024 - - RoleScopeSystem RoleScope = "System" - RoleScopeTeam RoleScope = "Team" - RoleScopeChannel RoleScope = "Channel" - - RoleTypeGuest RoleType = "Guest" - RoleTypeUser RoleType = "User" - RoleTypeAdmin RoleType = "Admin" -) - -type Role struct { - Id string `json:"id"` - Name string `json:"name"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - Permissions []string `json:"permissions"` - SchemeManaged bool `json:"scheme_managed"` - BuiltIn bool `json:"built_in"` -} - -type RolePatch struct { - Permissions *[]string `json:"permissions"` -} - -type RolePermissions struct { - RoleID string - Permissions []string -} - -func (r *Role) ToJson() string { - b, _ := json.Marshal(r) - return string(b) -} - -func RoleFromJson(data io.Reader) *Role { - var r *Role - json.NewDecoder(data).Decode(&r) - return r -} - -func RoleListToJson(r []*Role) string { - b, _ := json.Marshal(r) - return string(b) -} - -func RoleListFromJson(data io.Reader) []*Role { - var roles []*Role - json.NewDecoder(data).Decode(&roles) - return roles -} - -func (r *RolePatch) ToJson() string { - b, _ := json.Marshal(r) - return string(b) -} - -func RolePatchFromJson(data io.Reader) *RolePatch { - var rolePatch *RolePatch - json.NewDecoder(data).Decode(&rolePatch) - return rolePatch -} - -func (r *Role) Patch(patch *RolePatch) { - if patch.Permissions != nil { - r.Permissions = *patch.Permissions - } -} - -// MergeChannelHigherScopedPermissions is meant to be invoked on a channel scheme's role and merges the higher-scoped -// channel role's permissions. -func (r *Role) MergeChannelHigherScopedPermissions(higherScopedPermissions *RolePermissions) { - mergedPermissions := []string{} - - higherScopedPermissionsMap := AsStringBoolMap(higherScopedPermissions.Permissions) - rolePermissionsMap := AsStringBoolMap(r.Permissions) - - for _, cp := range ALL_PERMISSIONS { - if cp.Scope != PERMISSION_SCOPE_CHANNEL { - continue - } - - _, presentOnHigherScope := higherScopedPermissionsMap[cp.Id] - - // For the channel admin role always look to the higher scope to determine if the role has ther permission. - // The channel admin is a special case because they're not part of the UI to be "channel moderated", only - // channel members and channel guests are. - if higherScopedPermissions.RoleID == CHANNEL_ADMIN_ROLE_ID && presentOnHigherScope { - mergedPermissions = append(mergedPermissions, cp.Id) - continue - } - - _, permissionIsModerated := CHANNEL_MODERATED_PERMISSIONS_MAP[cp.Id] - if permissionIsModerated { - _, presentOnRole := rolePermissionsMap[cp.Id] - if presentOnRole && presentOnHigherScope { - mergedPermissions = append(mergedPermissions, cp.Id) - } - } else { - if presentOnHigherScope { - mergedPermissions = append(mergedPermissions, cp.Id) - } - } - } - - r.Permissions = mergedPermissions -} - -// Returns an array of permissions that are in either role.Permissions -// or patch.Permissions, but not both. -func PermissionsChangedByPatch(role *Role, patch *RolePatch) []string { - var result []string - - if patch.Permissions == nil { - return result - } - - roleMap := make(map[string]bool) - patchMap := make(map[string]bool) - - for _, permission := range role.Permissions { - roleMap[permission] = true - } - - for _, permission := range *patch.Permissions { - patchMap[permission] = true - } - - for _, permission := range role.Permissions { - if !patchMap[permission] { - result = append(result, permission) - } - } - - for _, permission := range *patch.Permissions { - if !roleMap[permission] { - result = append(result, permission) - } - } - - return result -} - -func ChannelModeratedPermissionsChangedByPatch(role *Role, patch *RolePatch) []string { - var result []string - - if role == nil { - return result - } - - if patch.Permissions == nil { - return result - } - - roleMap := make(map[string]bool) - patchMap := make(map[string]bool) - - for _, permission := range role.Permissions { - if channelModeratedPermissionName, found := CHANNEL_MODERATED_PERMISSIONS_MAP[permission]; found { - roleMap[channelModeratedPermissionName] = true - } - } - - for _, permission := range *patch.Permissions { - if channelModeratedPermissionName, found := CHANNEL_MODERATED_PERMISSIONS_MAP[permission]; found { - patchMap[channelModeratedPermissionName] = true - } - } - - for permissionKey := range roleMap { - if !patchMap[permissionKey] { - result = append(result, permissionKey) - } - } - - for permissionKey := range patchMap { - if !roleMap[permissionKey] { - result = append(result, permissionKey) - } - } - - return result -} - -// GetChannelModeratedPermissions returns a map of channel moderated permissions that the role has access to -func (r *Role) GetChannelModeratedPermissions(channelType string) map[string]bool { - moderatedPermissions := make(map[string]bool) - for _, permission := range r.Permissions { - if _, found := CHANNEL_MODERATED_PERMISSIONS_MAP[permission]; !found { - continue - } - - for moderated, moderatedPermissionValue := range CHANNEL_MODERATED_PERMISSIONS_MAP { - // the moderated permission has already been found to be true so skip this iteration - if moderatedPermissions[moderatedPermissionValue] { - continue - } - - if moderated == permission { - // Special case where the channel moderated permission for `manage_members` is different depending on whether the channel is private or public - if moderated == PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id || moderated == PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id { - canManagePublic := channelType == CHANNEL_OPEN && moderated == PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id - canManagePrivate := channelType == CHANNEL_PRIVATE && moderated == PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id - moderatedPermissions[moderatedPermissionValue] = canManagePublic || canManagePrivate - } else { - moderatedPermissions[moderatedPermissionValue] = true - } - } - } - } - - return moderatedPermissions -} - -// RolePatchFromChannelModerationsPatch Creates and returns a RolePatch based on a slice of ChannelModerationPatchs, roleName is expected to be either "members" or "guests". -func (r *Role) RolePatchFromChannelModerationsPatch(channelModerationsPatch []*ChannelModerationPatch, roleName string) *RolePatch { - permissionsToAddToPatch := make(map[string]bool) - - // Iterate through the list of existing permissions on the role and append permissions that we want to keep. - for _, permission := range r.Permissions { - // Permission is not moderated so dont add it to the patch and skip the channelModerationsPatch - if _, isModerated := CHANNEL_MODERATED_PERMISSIONS_MAP[permission]; !isModerated { - continue - } - - permissionEnabled := true - // Check if permission has a matching moderated permission name inside the channel moderation patch - for _, channelModerationPatch := range channelModerationsPatch { - if *channelModerationPatch.Name == CHANNEL_MODERATED_PERMISSIONS_MAP[permission] { - // Permission key exists in patch with a value of false so skip over it - if roleName == "members" { - if channelModerationPatch.Roles.Members != nil && !*channelModerationPatch.Roles.Members { - permissionEnabled = false - } - } else if roleName == "guests" { - if channelModerationPatch.Roles.Guests != nil && !*channelModerationPatch.Roles.Guests { - permissionEnabled = false - } - } - } - } - - if permissionEnabled { - permissionsToAddToPatch[permission] = true - } - } - - // Iterate through the patch and add any permissions that dont already exist on the role - for _, channelModerationPatch := range channelModerationsPatch { - for permission, moderatedPermissionName := range CHANNEL_MODERATED_PERMISSIONS_MAP { - if roleName == "members" && channelModerationPatch.Roles.Members != nil && *channelModerationPatch.Roles.Members && *channelModerationPatch.Name == moderatedPermissionName { - permissionsToAddToPatch[permission] = true - } - - if roleName == "guests" && channelModerationPatch.Roles.Guests != nil && *channelModerationPatch.Roles.Guests && *channelModerationPatch.Name == moderatedPermissionName { - permissionsToAddToPatch[permission] = true - } - } - } - - patchPermissions := make([]string, 0, len(permissionsToAddToPatch)) - for permission := range permissionsToAddToPatch { - patchPermissions = append(patchPermissions, permission) - } - - return &RolePatch{Permissions: &patchPermissions} -} - -func (r *Role) IsValid() bool { - if !IsValidId(r.Id) { - return false - } - - return r.IsValidWithoutId() -} - -func (r *Role) IsValidWithoutId() bool { - if !IsValidRoleName(r.Name) { - return false - } - - if len(r.DisplayName) == 0 || len(r.DisplayName) > ROLE_DISPLAY_NAME_MAX_LENGTH { - return false - } - - if len(r.Description) > ROLE_DESCRIPTION_MAX_LENGTH { - return false - } - - for _, permission := range r.Permissions { - permissionValidated := false - for _, p := range ALL_PERMISSIONS { - if permission == p.Id { - permissionValidated = true - break - } - } - - if !permissionValidated { - return false - } - } - - return true -} - -func CleanRoleNames(roleNames []string) ([]string, bool) { - var cleanedRoleNames []string - for _, roleName := range roleNames { - if strings.TrimSpace(roleName) == "" { - continue - } - - if !IsValidRoleName(roleName) { - return roleNames, false - } - - cleanedRoleNames = append(cleanedRoleNames, roleName) - } - - return cleanedRoleNames, true -} - -func IsValidRoleName(roleName string) bool { - if len(roleName) <= 0 || len(roleName) > ROLE_NAME_MAX_LENGTH { - return false - } - - if strings.TrimLeft(roleName, "abcdefghijklmnopqrstuvwxyz0123456789_") != "" { - return false - } - - return true -} - -func MakeDefaultRoles() map[string]*Role { - roles := make(map[string]*Role) - - roles[CHANNEL_GUEST_ROLE_ID] = &Role{ - Name: "channel_guest", - DisplayName: "authentication.roles.channel_guest.name", - Description: "authentication.roles.channel_guest.description", - Permissions: []string{ - PERMISSION_READ_CHANNEL.Id, - PERMISSION_ADD_REACTION.Id, - PERMISSION_REMOVE_REACTION.Id, - PERMISSION_UPLOAD_FILE.Id, - PERMISSION_EDIT_POST.Id, - PERMISSION_CREATE_POST.Id, - PERMISSION_USE_CHANNEL_MENTIONS.Id, - PERMISSION_USE_SLASH_COMMANDS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[CHANNEL_USER_ROLE_ID] = &Role{ - Name: "channel_user", - DisplayName: "authentication.roles.channel_user.name", - Description: "authentication.roles.channel_user.description", - Permissions: []string{ - PERMISSION_READ_CHANNEL.Id, - PERMISSION_ADD_REACTION.Id, - PERMISSION_REMOVE_REACTION.Id, - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id, - PERMISSION_UPLOAD_FILE.Id, - PERMISSION_GET_PUBLIC_LINK.Id, - PERMISSION_CREATE_POST.Id, - PERMISSION_USE_CHANNEL_MENTIONS.Id, - PERMISSION_USE_SLASH_COMMANDS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[CHANNEL_ADMIN_ROLE_ID] = &Role{ - Name: "channel_admin", - DisplayName: "authentication.roles.channel_admin.name", - Description: "authentication.roles.channel_admin.description", - Permissions: []string{ - PERMISSION_MANAGE_CHANNEL_ROLES.Id, - PERMISSION_USE_GROUP_MENTIONS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[TEAM_GUEST_ROLE_ID] = &Role{ - Name: "team_guest", - DisplayName: "authentication.roles.team_guest.name", - Description: "authentication.roles.team_guest.description", - Permissions: []string{ - PERMISSION_VIEW_TEAM.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[TEAM_USER_ROLE_ID] = &Role{ - Name: "team_user", - DisplayName: "authentication.roles.team_user.name", - Description: "authentication.roles.team_user.description", - Permissions: []string{ - PERMISSION_LIST_TEAM_CHANNELS.Id, - PERMISSION_JOIN_PUBLIC_CHANNELS.Id, - PERMISSION_READ_PUBLIC_CHANNEL.Id, - PERMISSION_VIEW_TEAM.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[TEAM_POST_ALL_ROLE_ID] = &Role{ - Name: "team_post_all", - DisplayName: "authentication.roles.team_post_all.name", - Description: "authentication.roles.team_post_all.description", - Permissions: []string{ - PERMISSION_CREATE_POST.Id, - PERMISSION_USE_CHANNEL_MENTIONS.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[TEAM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ - Name: "team_post_all_public", - DisplayName: "authentication.roles.team_post_all_public.name", - Description: "authentication.roles.team_post_all_public.description", - Permissions: []string{ - PERMISSION_CREATE_POST_PUBLIC.Id, - PERMISSION_USE_CHANNEL_MENTIONS.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[TEAM_ADMIN_ROLE_ID] = &Role{ - Name: "team_admin", - DisplayName: "authentication.roles.team_admin.name", - Description: "authentication.roles.team_admin.description", - Permissions: []string{ - PERMISSION_REMOVE_USER_FROM_TEAM.Id, - PERMISSION_MANAGE_TEAM.Id, - PERMISSION_IMPORT_TEAM.Id, - PERMISSION_MANAGE_TEAM_ROLES.Id, - PERMISSION_MANAGE_CHANNEL_ROLES.Id, - PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS.Id, - PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS.Id, - PERMISSION_MANAGE_SLASH_COMMANDS.Id, - PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS.Id, - PERMISSION_MANAGE_INCOMING_WEBHOOKS.Id, - PERMISSION_MANAGE_OUTGOING_WEBHOOKS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[SYSTEM_GUEST_ROLE_ID] = &Role{ - Name: "system_guest", - DisplayName: "authentication.roles.global_guest.name", - Description: "authentication.roles.global_guest.description", - Permissions: []string{ - PERMISSION_CREATE_DIRECT_CHANNEL.Id, - PERMISSION_CREATE_GROUP_CHANNEL.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[SYSTEM_USER_ROLE_ID] = &Role{ - Name: "system_user", - DisplayName: "authentication.roles.global_user.name", - Description: "authentication.roles.global_user.description", - Permissions: []string{ - PERMISSION_LIST_PUBLIC_TEAMS.Id, - PERMISSION_JOIN_PUBLIC_TEAMS.Id, - PERMISSION_CREATE_DIRECT_CHANNEL.Id, - PERMISSION_CREATE_GROUP_CHANNEL.Id, - PERMISSION_VIEW_MEMBERS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[SYSTEM_POST_ALL_ROLE_ID] = &Role{ - Name: "system_post_all", - DisplayName: "authentication.roles.system_post_all.name", - Description: "authentication.roles.system_post_all.description", - Permissions: []string{ - PERMISSION_CREATE_POST.Id, - PERMISSION_USE_CHANNEL_MENTIONS.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[SYSTEM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ - Name: "system_post_all_public", - DisplayName: "authentication.roles.system_post_all_public.name", - Description: "authentication.roles.system_post_all_public.description", - Permissions: []string{ - PERMISSION_CREATE_POST_PUBLIC.Id, - PERMISSION_USE_CHANNEL_MENTIONS.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[SYSTEM_USER_ACCESS_TOKEN_ROLE_ID] = &Role{ - Name: "system_user_access_token", - DisplayName: "authentication.roles.system_user_access_token.name", - Description: "authentication.roles.system_user_access_token.description", - Permissions: []string{ - PERMISSION_CREATE_USER_ACCESS_TOKEN.Id, - PERMISSION_READ_USER_ACCESS_TOKEN.Id, - PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[SYSTEM_ADMIN_ROLE_ID] = &Role{ - Name: "system_admin", - DisplayName: "authentication.roles.global_admin.name", - Description: "authentication.roles.global_admin.description", - // System admins can do anything channel and team admins can do - // plus everything members of teams and channels can do to all teams - // and channels on the system - Permissions: append( - append( - append( - append( - []string{ - PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE.Id, - PERMISSION_MANAGE_SYSTEM.Id, - PERMISSION_MANAGE_ROLES.Id, - PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES.Id, - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id, - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id, - PERMISSION_DELETE_PUBLIC_CHANNEL.Id, - PERMISSION_CREATE_PUBLIC_CHANNEL.Id, - PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES.Id, - PERMISSION_DELETE_PRIVATE_CHANNEL.Id, - PERMISSION_CREATE_PRIVATE_CHANNEL.Id, - PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH.Id, - PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS.Id, - PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS.Id, - PERMISSION_EDIT_OTHER_USERS.Id, - PERMISSION_EDIT_OTHERS_POSTS.Id, - PERMISSION_MANAGE_OAUTH.Id, - PERMISSION_INVITE_USER.Id, - PERMISSION_INVITE_GUEST.Id, - PERMISSION_PROMOTE_GUEST.Id, - PERMISSION_DEMOTE_TO_GUEST.Id, - PERMISSION_DELETE_POST.Id, - PERMISSION_DELETE_OTHERS_POSTS.Id, - PERMISSION_CREATE_TEAM.Id, - PERMISSION_ADD_USER_TO_TEAM.Id, - PERMISSION_LIST_USERS_WITHOUT_TEAM.Id, - PERMISSION_MANAGE_JOBS.Id, - PERMISSION_CREATE_POST_PUBLIC.Id, - PERMISSION_CREATE_POST_EPHEMERAL.Id, - PERMISSION_CREATE_USER_ACCESS_TOKEN.Id, - PERMISSION_READ_USER_ACCESS_TOKEN.Id, - PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, - PERMISSION_CREATE_BOT.Id, - PERMISSION_READ_BOTS.Id, - PERMISSION_READ_OTHERS_BOTS.Id, - PERMISSION_MANAGE_BOTS.Id, - PERMISSION_MANAGE_OTHERS_BOTS.Id, - PERMISSION_REMOVE_OTHERS_REACTIONS.Id, - PERMISSION_LIST_PRIVATE_TEAMS.Id, - PERMISSION_JOIN_PRIVATE_TEAMS.Id, - PERMISSION_VIEW_MEMBERS.Id, - }, - roles[TEAM_USER_ROLE_ID].Permissions..., - ), - roles[CHANNEL_USER_ROLE_ID].Permissions..., - ), - roles[TEAM_ADMIN_ROLE_ID].Permissions..., - ), - roles[CHANNEL_ADMIN_ROLE_ID].Permissions..., - ), - SchemeManaged: true, - BuiltIn: true, - } - - return roles -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/scheduled_task.go b/vendor/github.com/mattermost/mattermost-server/v5/model/scheduled_task.go deleted file mode 100644 index 657cc749..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/scheduled_task.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "fmt" - "time" -) - -type TaskFunc func() - -type ScheduledTask struct { - Name string `json:"name"` - Interval time.Duration `json:"interval"` - Recurring bool `json:"recurring"` - function func() - cancel chan struct{} - cancelled chan struct{} -} - -func CreateTask(name string, function TaskFunc, timeToExecution time.Duration) *ScheduledTask { - return createTask(name, function, timeToExecution, false) -} - -func CreateRecurringTask(name string, function TaskFunc, interval time.Duration) *ScheduledTask { - return createTask(name, function, interval, true) -} - -func createTask(name string, function TaskFunc, interval time.Duration, recurring bool) *ScheduledTask { - task := &ScheduledTask{ - Name: name, - Interval: interval, - Recurring: recurring, - function: function, - cancel: make(chan struct{}), - cancelled: make(chan struct{}), - } - - go func() { - defer close(task.cancelled) - - ticker := time.NewTicker(interval) - defer func() { - ticker.Stop() - }() - - for { - select { - case <-ticker.C: - function() - case <-task.cancel: - return - } - - if !task.Recurring { - break - } - } - }() - - return task -} - -func (task *ScheduledTask) Cancel() { - close(task.cancel) - <-task.cancelled -} - -func (task *ScheduledTask) String() string { - return fmt.Sprintf( - "%s\nInterval: %s\nRecurring: %t\n", - task.Name, - task.Interval.String(), - task.Recurring, - ) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/scheme.go b/vendor/github.com/mattermost/mattermost-server/v5/model/scheme.go deleted file mode 100644 index 630f14a6..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/scheme.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "fmt" - "io" - "regexp" -) - -const ( - SCHEME_DISPLAY_NAME_MAX_LENGTH = 128 - SCHEME_NAME_MAX_LENGTH = 64 - SCHEME_DESCRIPTION_MAX_LENGTH = 1024 - SCHEME_SCOPE_TEAM = "team" - SCHEME_SCOPE_CHANNEL = "channel" -) - -type Scheme struct { - Id string `json:"id"` - Name string `json:"name"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - Scope string `json:"scope"` - DefaultTeamAdminRole string `json:"default_team_admin_role"` - DefaultTeamUserRole string `json:"default_team_user_role"` - DefaultChannelAdminRole string `json:"default_channel_admin_role"` - DefaultChannelUserRole string `json:"default_channel_user_role"` - DefaultTeamGuestRole string `json:"default_team_guest_role"` - DefaultChannelGuestRole string `json:"default_channel_guest_role"` -} - -type SchemePatch struct { - Name *string `json:"name"` - DisplayName *string `json:"display_name"` - Description *string `json:"description"` -} - -type SchemeIDPatch struct { - SchemeID *string `json:"scheme_id"` -} - -// SchemeConveyor is used for importing and exporting a Scheme and its associated Roles. -type SchemeConveyor struct { - Name string `json:"name"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - Scope string `json:"scope"` - TeamAdmin string `json:"default_team_admin_role"` - TeamUser string `json:"default_team_user_role"` - TeamGuest string `json:"default_team_guest_role"` - ChannelAdmin string `json:"default_channel_admin_role"` - ChannelUser string `json:"default_channel_user_role"` - ChannelGuest string `json:"default_channel_guest_role"` - Roles []*Role `json:"roles"` -} - -func (sc *SchemeConveyor) Scheme() *Scheme { - return &Scheme{ - DisplayName: sc.DisplayName, - Name: sc.Name, - Description: sc.Description, - Scope: sc.Scope, - DefaultTeamAdminRole: sc.TeamAdmin, - DefaultTeamUserRole: sc.TeamUser, - DefaultTeamGuestRole: sc.TeamGuest, - DefaultChannelAdminRole: sc.ChannelAdmin, - DefaultChannelUserRole: sc.ChannelUser, - DefaultChannelGuestRole: sc.ChannelGuest, - } -} - -type SchemeRoles struct { - SchemeAdmin bool `json:"scheme_admin"` - SchemeUser bool `json:"scheme_user"` - SchemeGuest bool `json:"scheme_guest"` -} - -func (scheme *Scheme) ToJson() string { - b, _ := json.Marshal(scheme) - return string(b) -} - -func SchemeFromJson(data io.Reader) *Scheme { - var scheme *Scheme - json.NewDecoder(data).Decode(&scheme) - return scheme -} - -func SchemesToJson(schemes []*Scheme) string { - b, _ := json.Marshal(schemes) - return string(b) -} - -func SchemesFromJson(data io.Reader) []*Scheme { - var schemes []*Scheme - if err := json.NewDecoder(data).Decode(&schemes); err == nil { - return schemes - } else { - return nil - } -} - -func (scheme *Scheme) IsValid() bool { - if !IsValidId(scheme.Id) { - return false - } - - return scheme.IsValidForCreate() -} - -func (scheme *Scheme) IsValidForCreate() bool { - if len(scheme.DisplayName) == 0 || len(scheme.DisplayName) > SCHEME_DISPLAY_NAME_MAX_LENGTH { - return false - } - - if !IsValidSchemeName(scheme.Name) { - return false - } - - if len(scheme.Description) > SCHEME_DESCRIPTION_MAX_LENGTH { - return false - } - - switch scheme.Scope { - case SCHEME_SCOPE_TEAM, SCHEME_SCOPE_CHANNEL: - default: - return false - } - - if !IsValidRoleName(scheme.DefaultChannelAdminRole) { - return false - } - - if !IsValidRoleName(scheme.DefaultChannelUserRole) { - return false - } - - if !IsValidRoleName(scheme.DefaultChannelGuestRole) { - return false - } - - if scheme.Scope == SCHEME_SCOPE_TEAM { - if !IsValidRoleName(scheme.DefaultTeamAdminRole) { - return false - } - - if !IsValidRoleName(scheme.DefaultTeamUserRole) { - return false - } - - if !IsValidRoleName(scheme.DefaultTeamGuestRole) { - return false - } - } - - if scheme.Scope == SCHEME_SCOPE_CHANNEL { - if len(scheme.DefaultTeamAdminRole) != 0 { - return false - } - - if len(scheme.DefaultTeamUserRole) != 0 { - return false - } - - if len(scheme.DefaultTeamGuestRole) != 0 { - return false - } - } - - return true -} - -func (scheme *Scheme) Patch(patch *SchemePatch) { - if patch.DisplayName != nil { - scheme.DisplayName = *patch.DisplayName - } - if patch.Name != nil { - scheme.Name = *patch.Name - } - if patch.Description != nil { - scheme.Description = *patch.Description - } -} - -func (patch *SchemePatch) ToJson() string { - b, _ := json.Marshal(patch) - return string(b) -} - -func SchemePatchFromJson(data io.Reader) *SchemePatch { - var patch *SchemePatch - json.NewDecoder(data).Decode(&patch) - return patch -} - -func SchemeIDFromJson(data io.Reader) *string { - var p *SchemeIDPatch - json.NewDecoder(data).Decode(&p) - return p.SchemeID -} - -func (p *SchemeIDPatch) ToJson() string { - b, _ := json.Marshal(p) - return string(b) -} - -func IsValidSchemeName(name string) bool { - re := regexp.MustCompile(fmt.Sprintf("^[a-z0-9_]{2,%d}$", SCHEME_NAME_MAX_LENGTH)) - return re.MatchString(name) -} - -func (schemeRoles *SchemeRoles) ToJson() string { - b, _ := json.Marshal(schemeRoles) - return string(b) -} - -func SchemeRolesFromJson(data io.Reader) *SchemeRoles { - var schemeRoles *SchemeRoles - json.NewDecoder(data).Decode(&schemeRoles) - return schemeRoles -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/security_bulletin.go b/vendor/github.com/mattermost/mattermost-server/v5/model/security_bulletin.go deleted file mode 100644 index ae66cf30..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/security_bulletin.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type SecurityBulletin struct { - Id string `json:"id"` - AppliesToVersion string `json:"applies_to_version"` -} - -type SecurityBulletins []SecurityBulletin - -func (me *SecurityBulletin) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func SecurityBulletinFromJson(data io.Reader) *SecurityBulletin { - var o *SecurityBulletin - json.NewDecoder(data).Decode(&o) - return o -} - -func (me SecurityBulletins) ToJson() string { - if b, err := json.Marshal(me); err != nil { - return "[]" - } else { - return string(b) - } -} - -func SecurityBulletinsFromJson(data io.Reader) SecurityBulletins { - var o SecurityBulletins - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/session.go b/vendor/github.com/mattermost/mattermost-server/v5/model/session.go deleted file mode 100644 index f3b4ae33..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/session.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "strconv" - "strings" - - "github.com/mattermost/mattermost-server/v5/mlog" -) - -const ( - SESSION_COOKIE_TOKEN = "MMAUTHTOKEN" - SESSION_COOKIE_USER = "MMUSERID" - SESSION_COOKIE_CSRF = "MMCSRF" - SESSION_CACHE_SIZE = 35000 - SESSION_PROP_PLATFORM = "platform" - SESSION_PROP_OS = "os" - SESSION_PROP_BROWSER = "browser" - SESSION_PROP_TYPE = "type" - SESSION_PROP_USER_ACCESS_TOKEN_ID = "user_access_token_id" - SESSION_PROP_IS_BOT = "is_bot" - SESSION_PROP_IS_BOT_VALUE = "true" - SESSION_TYPE_USER_ACCESS_TOKEN = "UserAccessToken" - SESSION_PROP_IS_GUEST = "is_guest" - SESSION_ACTIVITY_TIMEOUT = 1000 * 60 * 5 // 5 minutes - SESSION_USER_ACCESS_TOKEN_EXPIRY = 100 * 365 // 100 years -) - -type Session struct { - Id string `json:"id"` - Token string `json:"token"` - CreateAt int64 `json:"create_at"` - ExpiresAt int64 `json:"expires_at"` - LastActivityAt int64 `json:"last_activity_at"` - UserId string `json:"user_id"` - DeviceId string `json:"device_id"` - Roles string `json:"roles"` - IsOAuth bool `json:"is_oauth"` - ExpiredNotify bool `json:"expired_notify"` - Props StringMap `json:"props"` - TeamMembers []*TeamMember `json:"team_members" db:"-"` - Local bool `json:"local" db:"-"` -} - -// Returns true if the session is unrestricted, which should grant it -// with all permissions. This is used for local mode sessions -func (me *Session) IsUnrestricted() bool { - return me.Local -} - -func (me *Session) DeepCopy() *Session { - copySession := *me - - if me.Props != nil { - copySession.Props = CopyStringMap(me.Props) - } - - if me.TeamMembers != nil { - copySession.TeamMembers = make([]*TeamMember, len(me.TeamMembers)) - for index, tm := range me.TeamMembers { - copySession.TeamMembers[index] = new(TeamMember) - *copySession.TeamMembers[index] = *tm - } - } - - return ©Session -} - -func (me *Session) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func SessionFromJson(data io.Reader) *Session { - var me *Session - json.NewDecoder(data).Decode(&me) - return me -} - -func (me *Session) PreSave() { - if me.Id == "" { - me.Id = NewId() - } - - if me.Token == "" { - me.Token = NewId() - } - - me.CreateAt = GetMillis() - me.LastActivityAt = me.CreateAt - - if me.Props == nil { - me.Props = make(map[string]string) - } -} - -func (me *Session) Sanitize() { - me.Token = "" -} - -func (me *Session) IsExpired() bool { - - if me.ExpiresAt <= 0 { - return false - } - - if GetMillis() > me.ExpiresAt { - return true - } - - return false -} - -func (me *Session) SetExpireInDays(days int) { - if me.CreateAt == 0 { - me.ExpiresAt = GetMillis() + (1000 * 60 * 60 * 24 * int64(days)) - } else { - me.ExpiresAt = me.CreateAt + (1000 * 60 * 60 * 24 * int64(days)) - } -} - -func (me *Session) AddProp(key string, value string) { - - if me.Props == nil { - me.Props = make(map[string]string) - } - - me.Props[key] = value -} - -func (me *Session) GetTeamByTeamId(teamId string) *TeamMember { - for _, team := range me.TeamMembers { - if team.TeamId == teamId { - return team - } - } - - return nil -} - -func (me *Session) IsMobileApp() bool { - return len(me.DeviceId) > 0 || me.IsMobile() -} - -func (me *Session) IsMobile() bool { - val, ok := me.Props[USER_AUTH_SERVICE_IS_MOBILE] - if !ok { - return false - } - isMobile, err := strconv.ParseBool(val) - if err != nil { - mlog.Error("Error parsing boolean property from Session", mlog.Err(err)) - return false - } - return isMobile -} - -func (me *Session) IsSaml() bool { - val, ok := me.Props[USER_AUTH_SERVICE_IS_SAML] - if !ok { - return false - } - isSaml, err := strconv.ParseBool(val) - if err != nil { - mlog.Error("Error parsing boolean property from Session", mlog.Err(err)) - return false - } - return isSaml -} - -func (me *Session) IsOAuthUser() bool { - val, ok := me.Props[USER_AUTH_SERVICE_IS_OAUTH] - if !ok { - return false - } - isOAuthUser, err := strconv.ParseBool(val) - if err != nil { - mlog.Error("Error parsing boolean property from Session", mlog.Err(err)) - return false - } - return isOAuthUser -} - -func (me *Session) IsSSOLogin() bool { - return me.IsOAuthUser() || me.IsSaml() -} - -func (me *Session) GetUserRoles() []string { - return strings.Fields(me.Roles) -} - -func (me *Session) GenerateCSRF() string { - token := NewId() - me.AddProp("csrf", token) - return token -} - -func (me *Session) GetCSRF() string { - if me.Props == nil { - return "" - } - - return me.Props["csrf"] -} - -func SessionsToJson(o []*Session) string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - -func SessionsFromJson(data io.Reader) []*Session { - var o []*Session - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/status.go b/vendor/github.com/mattermost/mattermost-server/v5/model/status.go deleted file mode 100644 index 741fa1ed..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/status.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -const ( - STATUS_OUT_OF_OFFICE = "ooo" - STATUS_OFFLINE = "offline" - STATUS_AWAY = "away" - STATUS_DND = "dnd" - STATUS_ONLINE = "online" - STATUS_CACHE_SIZE = SESSION_CACHE_SIZE - STATUS_CHANNEL_TIMEOUT = 20000 // 20 seconds - STATUS_MIN_UPDATE_TIME = 120000 // 2 minutes -) - -type Status struct { - UserId string `json:"user_id"` - Status string `json:"status"` - Manual bool `json:"manual"` - LastActivityAt int64 `json:"last_activity_at"` - ActiveChannel string `json:"active_channel,omitempty" db:"-"` -} - -func (o *Status) ToJson() string { - oCopy := *o - oCopy.ActiveChannel = "" - b, _ := json.Marshal(oCopy) - return string(b) -} - -func (o *Status) ToClusterJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func StatusFromJson(data io.Reader) *Status { - var o *Status - json.NewDecoder(data).Decode(&o) - return o -} - -func StatusListToJson(u []*Status) string { - uCopy := make([]Status, len(u)) - for i, s := range u { - sCopy := *s - sCopy.ActiveChannel = "" - uCopy[i] = sCopy - } - - b, _ := json.Marshal(uCopy) - return string(b) -} - -func StatusListFromJson(data io.Reader) []*Status { - var statuses []*Status - json.NewDecoder(data).Decode(&statuses) - return statuses -} - -func StatusMapToInterfaceMap(statusMap map[string]*Status) map[string]interface{} { - interfaceMap := map[string]interface{}{} - for _, s := range statusMap { - // Omitted statues mean offline - if s.Status != STATUS_OFFLINE { - interfaceMap[s.UserId] = s.Status - } - } - return interfaceMap -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/suggest_command.go b/vendor/github.com/mattermost/mattermost-server/v5/model/suggest_command.go deleted file mode 100644 index 45a7af38..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/suggest_command.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type SuggestCommand struct { - Suggestion string `json:"suggestion"` - Description string `json:"description"` -} - -func (o *SuggestCommand) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func SuggestCommandFromJson(data io.Reader) *SuggestCommand { - var o *SuggestCommand - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/switch_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/switch_request.go deleted file mode 100644 index 0ec4db7d..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/switch_request.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type SwitchRequest struct { - CurrentService string `json:"current_service"` - NewService string `json:"new_service"` - Email string `json:"email"` - Password string `json:"password"` - NewPassword string `json:"new_password"` - MfaCode string `json:"mfa_code"` - LdapLoginId string `json:"ldap_id"` -} - -func (o *SwitchRequest) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func SwitchRequestFromJson(data io.Reader) *SwitchRequest { - var o *SwitchRequest - json.NewDecoder(data).Decode(&o) - return o -} - -func (o *SwitchRequest) EmailToOAuth() bool { - return o.CurrentService == USER_AUTH_SERVICE_EMAIL && - (o.NewService == USER_AUTH_SERVICE_SAML || - o.NewService == USER_AUTH_SERVICE_GITLAB || - o.NewService == SERVICE_GOOGLE || - o.NewService == SERVICE_OFFICE365) -} - -func (o *SwitchRequest) OAuthToEmail() bool { - return (o.CurrentService == USER_AUTH_SERVICE_SAML || - o.CurrentService == USER_AUTH_SERVICE_GITLAB || - o.CurrentService == SERVICE_GOOGLE || - o.CurrentService == SERVICE_OFFICE365) && o.NewService == USER_AUTH_SERVICE_EMAIL -} - -func (o *SwitchRequest) EmailToLdap() bool { - return o.CurrentService == USER_AUTH_SERVICE_EMAIL && o.NewService == USER_AUTH_SERVICE_LDAP -} - -func (o *SwitchRequest) LdapToEmail() bool { - return o.CurrentService == USER_AUTH_SERVICE_LDAP && o.NewService == USER_AUTH_SERVICE_EMAIL -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/system.go b/vendor/github.com/mattermost/mattermost-server/v5/model/system.go deleted file mode 100644 index 1ad0775c..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/system.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "math/big" -) - -const ( - SYSTEM_DIAGNOSTIC_ID = "DiagnosticId" - SYSTEM_RAN_UNIT_TESTS = "RanUnitTests" - SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime" - SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId" - SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime" - SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey" - SYSTEM_POST_ACTION_COOKIE_SECRET = "PostActionCookieSecret" - SYSTEM_INSTALLATION_DATE_KEY = "InstallationDate" - SYSTEM_FIRST_SERVER_RUN_TIMESTAMP_KEY = "FirstServerRunTimestamp" - SYSTEM_CLUSTER_ENCRYPTION_KEY = "ClusterEncryptionKey" - SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200 = "warn_metric_number_of_active_users_200" - SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_400 = "warn_metric_number_of_active_users_400" - SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500 = "warn_metric_number_of_active_users_500" -) - -const ( - WARN_METRIC_STATUS_LIMIT_REACHED = "true" - WARN_METRIC_STATUS_RUNONCE = "runonce" - WARN_METRIC_STATUS_ACK = "ack" - WARN_METRIC_STATUS_STORE_PREFIX = "warn_metric_" -) - -type System struct { - Name string `json:"name"` - Value string `json:"value"` -} - -func (o *System) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func SystemFromJson(data io.Reader) *System { - var o *System - json.NewDecoder(data).Decode(&o) - return o -} - -type SystemPostActionCookieSecret struct { - Secret []byte `json:"key,omitempty"` -} - -type SystemAsymmetricSigningKey struct { - ECDSAKey *SystemECDSAKey `json:"ecdsa_key,omitempty"` -} - -type SystemECDSAKey struct { - Curve string `json:"curve"` - X *big.Int `json:"x"` - Y *big.Int `json:"y"` - D *big.Int `json:"d,omitempty"` -} - -// ServerBusyState provides serialization for app.Busy. -type ServerBusyState struct { - Busy bool `json:"busy"` - Expires int64 `json:"expires"` - Expires_ts string `json:"expires_ts,omitempty"` -} - -func (sbs *ServerBusyState) ToJson() string { - b, _ := json.Marshal(sbs) - return string(b) -} - -func ServerBusyStateFromJson(r io.Reader) *ServerBusyState { - var sbs *ServerBusyState - json.NewDecoder(r).Decode(&sbs) - return sbs -} - -var WarnMetricsTable = map[string]WarnMetric{ - SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200: { - Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200, - Limit: 200, - IsBotOnly: true, - IsRunOnce: true, - }, - SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_400: { - Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_400, - Limit: 400, - IsBotOnly: true, - IsRunOnce: true, - }, - SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500: { - Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500, - Limit: 500, - IsBotOnly: false, - IsRunOnce: false, - }, -} - -type WarnMetric struct { - Id string - Limit int64 - IsBotOnly bool - IsRunOnce bool -} - -type WarnMetricDisplayTexts struct { - BotTitle string - BotMessageBody string - BotMailToBody string - EmailBody string -} -type WarnMetricStatus struct { - Id string `json:"id"` - Limit int64 `json:"limit"` - Acked bool `json:"acked"` - StoreStatus string `json:"store_status,omitempty"` -} - -func (wms *WarnMetricStatus) ToJson() string { - b, _ := json.Marshal(wms) - return string(b) -} - -func WarnMetricStatusFromJson(data io.Reader) *WarnMetricStatus { - var o WarnMetricStatus - if err := json.NewDecoder(data).Decode(&o); err != nil { - return nil - } else { - return &o - } -} - -func MapWarnMetricStatusToJson(o map[string]*WarnMetricStatus) string { - b, _ := json.Marshal(o) - return string(b) -} - -type SendWarnMetricAck struct { - ForceAck bool `json:"forceAck"` -} - -func (swma *SendWarnMetricAck) ToJson() string { - b, _ := json.Marshal(swma) - return string(b) -} - -func SendWarnMetricAckFromJson(r io.Reader) *SendWarnMetricAck { - var swma *SendWarnMetricAck - json.NewDecoder(r).Decode(&swma) - return swma -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go deleted file mode 100644 index b8b1fe30..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type TeamSearch struct { - Term string `json:"term"` - Page *int `json:"page,omitempty"` - PerPage *int `json:"per_page,omitempty"` -} - -func (t *TeamSearch) IsPaginated() bool { - return t.Page != nil && t.PerPage != nil -} - -// ToJson convert a TeamSearch to json string -func (t *TeamSearch) ToJson() string { - b, err := json.Marshal(t) - if err != nil { - return "" - } - - return string(b) -} - -// TeamSearchFromJson decodes the input and returns a TeamSearch -func TeamSearchFromJson(data io.Reader) *TeamSearch { - decoder := json.NewDecoder(data) - var cs TeamSearch - err := decoder.Decode(&cs) - if err == nil { - return &cs - } - - return nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go deleted file mode 100644 index e2e9d3bf..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type TypingRequest struct { - ChannelId string `json:"channel_id"` - ParentId string `json:"parent_id"` -} - -func (o *TypingRequest) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func TypingRequestFromJson(data io.Reader) *TypingRequest { - var o *TypingRequest - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token_search.go deleted file mode 100644 index a692f692..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token_search.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type UserAccessTokenSearch struct { - Term string `json:"term"` -} - -// ToJson convert a UserAccessTokenSearch to json string -func (c *UserAccessTokenSearch) ToJson() string { - b, err := json.Marshal(c) - if err != nil { - return "" - } - - return string(b) -} - -// UserAccessTokenSearchJson decodes the input and returns a UserAccessTokenSearch -func UserAccessTokenSearchFromJson(data io.Reader) *UserAccessTokenSearch { - decoder := json.NewDecoder(data) - var cs UserAccessTokenSearch - err := decoder.Decode(&cs) - if err == nil { - return &cs - } - - return nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_autocomplete.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_autocomplete.go deleted file mode 100644 index 48a892e2..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_autocomplete.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type UserAutocompleteInChannel struct { - InChannel []*User `json:"in_channel"` - OutOfChannel []*User `json:"out_of_channel"` -} - -type UserAutocompleteInTeam struct { - InTeam []*User `json:"in_team"` -} - -type UserAutocomplete struct { - Users []*User `json:"users"` - OutOfChannel []*User `json:"out_of_channel,omitempty"` -} - -func (o *UserAutocomplete) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func UserAutocompleteFromJson(data io.Reader) *UserAutocomplete { - decoder := json.NewDecoder(data) - autocomplete := new(UserAutocomplete) - err := decoder.Decode(&autocomplete) - if err == nil { - return autocomplete - } else { - return nil - } -} - -func (o *UserAutocompleteInChannel) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func UserAutocompleteInChannelFromJson(data io.Reader) *UserAutocompleteInChannel { - var o *UserAutocompleteInChannel - json.NewDecoder(data).Decode(&o) - return o -} - -func (o *UserAutocompleteInTeam) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func UserAutocompleteInTeamFromJson(data io.Reader) *UserAutocompleteInTeam { - var o *UserAutocompleteInTeam - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/users_stats.go b/vendor/github.com/mattermost/mattermost-server/v5/model/users_stats.go deleted file mode 100644 index 3e18296e..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/users_stats.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type UsersStats struct { - TotalUsersCount int64 `json:"total_users_count"` -} - -func (o *UsersStats) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func UsersStatsFromJson(data io.Reader) *UsersStats { - var o *UsersStats - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go deleted file mode 100644 index 281b50cf..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -const ( - WEBSOCKET_EVENT_TYPING = "typing" - WEBSOCKET_EVENT_POSTED = "posted" - WEBSOCKET_EVENT_POST_EDITED = "post_edited" - WEBSOCKET_EVENT_POST_DELETED = "post_deleted" - WEBSOCKET_EVENT_POST_UNREAD = "post_unread" - WEBSOCKET_EVENT_CHANNEL_CONVERTED = "channel_converted" - WEBSOCKET_EVENT_CHANNEL_CREATED = "channel_created" - WEBSOCKET_EVENT_CHANNEL_DELETED = "channel_deleted" - WEBSOCKET_EVENT_CHANNEL_RESTORED = "channel_restored" - WEBSOCKET_EVENT_CHANNEL_UPDATED = "channel_updated" - WEBSOCKET_EVENT_CHANNEL_MEMBER_UPDATED = "channel_member_updated" - WEBSOCKET_EVENT_CHANNEL_SCHEME_UPDATED = "channel_scheme_updated" - WEBSOCKET_EVENT_DIRECT_ADDED = "direct_added" - WEBSOCKET_EVENT_GROUP_ADDED = "group_added" - WEBSOCKET_EVENT_NEW_USER = "new_user" - WEBSOCKET_EVENT_ADDED_TO_TEAM = "added_to_team" - WEBSOCKET_EVENT_LEAVE_TEAM = "leave_team" - WEBSOCKET_EVENT_UPDATE_TEAM = "update_team" - WEBSOCKET_EVENT_DELETE_TEAM = "delete_team" - WEBSOCKET_EVENT_RESTORE_TEAM = "restore_team" - WEBSOCKET_EVENT_UPDATE_TEAM_SCHEME = "update_team_scheme" - WEBSOCKET_EVENT_USER_ADDED = "user_added" - WEBSOCKET_EVENT_USER_UPDATED = "user_updated" - WEBSOCKET_EVENT_USER_ROLE_UPDATED = "user_role_updated" - WEBSOCKET_EVENT_MEMBERROLE_UPDATED = "memberrole_updated" - WEBSOCKET_EVENT_USER_REMOVED = "user_removed" - WEBSOCKET_EVENT_PREFERENCE_CHANGED = "preference_changed" - WEBSOCKET_EVENT_PREFERENCES_CHANGED = "preferences_changed" - WEBSOCKET_EVENT_PREFERENCES_DELETED = "preferences_deleted" - WEBSOCKET_EVENT_EPHEMERAL_MESSAGE = "ephemeral_message" - WEBSOCKET_EVENT_STATUS_CHANGE = "status_change" - WEBSOCKET_EVENT_HELLO = "hello" - WEBSOCKET_AUTHENTICATION_CHALLENGE = "authentication_challenge" - WEBSOCKET_EVENT_REACTION_ADDED = "reaction_added" - WEBSOCKET_EVENT_REACTION_REMOVED = "reaction_removed" - WEBSOCKET_EVENT_RESPONSE = "response" - WEBSOCKET_EVENT_EMOJI_ADDED = "emoji_added" - WEBSOCKET_EVENT_CHANNEL_VIEWED = "channel_viewed" - WEBSOCKET_EVENT_PLUGIN_STATUSES_CHANGED = "plugin_statuses_changed" - WEBSOCKET_EVENT_PLUGIN_ENABLED = "plugin_enabled" - WEBSOCKET_EVENT_PLUGIN_DISABLED = "plugin_disabled" - WEBSOCKET_EVENT_ROLE_UPDATED = "role_updated" - WEBSOCKET_EVENT_LICENSE_CHANGED = "license_changed" - WEBSOCKET_EVENT_CONFIG_CHANGED = "config_changed" - WEBSOCKET_EVENT_OPEN_DIALOG = "open_dialog" - WEBSOCKET_EVENT_GUESTS_DEACTIVATED = "guests_deactivated" - WEBSOCKET_EVENT_RECEIVED_GROUP = "received_group" - WEBSOCKET_EVENT_RECEIVED_GROUP_ASSOCIATED_TO_TEAM = "received_group_associated_to_team" - WEBSOCKET_EVENT_RECEIVED_GROUP_NOT_ASSOCIATED_TO_TEAM = "received_group_not_associated_to_team" - WEBSOCKET_EVENT_RECEIVED_GROUP_ASSOCIATED_TO_CHANNEL = "received_group_associated_to_channel" - WEBSOCKET_EVENT_RECEIVED_GROUP_NOT_ASSOCIATED_TO_CHANNEL = "received_group_not_associated_to_channel" - WEBSOCKET_EVENT_SIDEBAR_CATEGORY_CREATED = "sidebar_category_created" - WEBSOCKET_EVENT_SIDEBAR_CATEGORY_UPDATED = "sidebar_category_updated" - WEBSOCKET_EVENT_SIDEBAR_CATEGORY_DELETED = "sidebar_category_deleted" - WEBSOCKET_EVENT_SIDEBAR_CATEGORY_ORDER_UPDATED = "sidebar_category_order_updated" - WEBSOCKET_WARN_METRIC_STATUS_RECEIVED = "warn_metric_status_received" - WEBSOCKET_WARN_METRIC_STATUS_REMOVED = "warn_metric_status_removed" -) - -type WebSocketMessage interface { - ToJson() string - IsValid() bool - EventType() string -} - -type WebsocketBroadcast struct { - OmitUsers map[string]bool `json:"omit_users"` // broadcast is omitted for users listed here - UserId string `json:"user_id"` // broadcast only occurs for this user - ChannelId string `json:"channel_id"` // broadcast only occurs for users in this channel - TeamId string `json:"team_id"` // broadcast only occurs for users in this team - ContainsSanitizedData bool `json:"-"` - ContainsSensitiveData bool `json:"-"` -} - -type precomputedWebSocketEventJSON struct { - Event json.RawMessage - Data json.RawMessage - Broadcast json.RawMessage -} - -// webSocketEventJSON mirrors WebSocketEvent to make some of its unexported fields serializable -type webSocketEventJSON struct { - Event string `json:"event"` - Data map[string]interface{} `json:"data"` - Broadcast *WebsocketBroadcast `json:"broadcast"` - Sequence int64 `json:"seq"` -} - -// **NOTE**: Direct access to WebSocketEvent fields is deprecated. They will be -// made unexported in next major version release. Provided getter functions should be used instead. -type WebSocketEvent struct { - Event string // Deprecated: use EventType() - Data map[string]interface{} // Deprecated: use GetData() - Broadcast *WebsocketBroadcast // Deprecated: use GetBroadcast() - Sequence int64 // Deprecated: use GetSequence() - precomputedJSON *precomputedWebSocketEventJSON -} - -// PrecomputeJSON precomputes and stores the serialized JSON for all fields other than Sequence. -// This makes ToJson much more efficient when sending the same event to multiple connections. -func (ev *WebSocketEvent) PrecomputeJSON() *WebSocketEvent { - copy := ev.Copy() - event, _ := json.Marshal(copy.Event) - data, _ := json.Marshal(copy.Data) - broadcast, _ := json.Marshal(copy.Broadcast) - copy.precomputedJSON = &precomputedWebSocketEventJSON{ - Event: json.RawMessage(event), - Data: json.RawMessage(data), - Broadcast: json.RawMessage(broadcast), - } - return copy -} - -func (ev *WebSocketEvent) Add(key string, value interface{}) { - ev.Data[key] = value -} - -func NewWebSocketEvent(event, teamId, channelId, userId string, omitUsers map[string]bool) *WebSocketEvent { - return &WebSocketEvent{Event: event, Data: make(map[string]interface{}), - Broadcast: &WebsocketBroadcast{TeamId: teamId, ChannelId: channelId, UserId: userId, OmitUsers: omitUsers}} -} - -func (ev *WebSocketEvent) Copy() *WebSocketEvent { - copy := &WebSocketEvent{ - Event: ev.Event, - Data: ev.Data, - Broadcast: ev.Broadcast, - Sequence: ev.Sequence, - precomputedJSON: ev.precomputedJSON, - } - return copy -} - -func (ev *WebSocketEvent) GetData() map[string]interface{} { - return ev.Data -} - -func (ev *WebSocketEvent) GetBroadcast() *WebsocketBroadcast { - return ev.Broadcast -} - -func (ev *WebSocketEvent) GetSequence() int64 { - return ev.Sequence -} - -func (ev *WebSocketEvent) SetEvent(event string) *WebSocketEvent { - copy := ev.Copy() - copy.Event = event - return copy -} - -func (ev *WebSocketEvent) SetData(data map[string]interface{}) *WebSocketEvent { - copy := ev.Copy() - copy.Data = data - return copy -} - -func (ev *WebSocketEvent) SetBroadcast(broadcast *WebsocketBroadcast) *WebSocketEvent { - copy := ev.Copy() - copy.Broadcast = broadcast - return copy -} - -func (ev *WebSocketEvent) SetSequence(seq int64) *WebSocketEvent { - copy := ev.Copy() - copy.Sequence = seq - return copy -} - -func (ev *WebSocketEvent) IsValid() bool { - return ev.Event != "" -} - -func (ev *WebSocketEvent) EventType() string { - return ev.Event -} - -func (ev *WebSocketEvent) ToJson() string { - if ev.precomputedJSON != nil { - return fmt.Sprintf(`{"event": %s, "data": %s, "broadcast": %s, "seq": %d}`, ev.precomputedJSON.Event, ev.precomputedJSON.Data, ev.precomputedJSON.Broadcast, ev.Sequence) - } - b, _ := json.Marshal(webSocketEventJSON{ - ev.Event, - ev.Data, - ev.Broadcast, - ev.Sequence, - }) - return string(b) -} - -func WebSocketEventFromJson(data io.Reader) *WebSocketEvent { - var ev WebSocketEvent - var o webSocketEventJSON - if err := json.NewDecoder(data).Decode(&o); err != nil { - return nil - } - ev.Event = o.Event - if u, ok := o.Data["user"]; ok { - // We need to convert to and from JSON again - // because the user is in the form of a map[string]interface{}. - buf, err := json.Marshal(u) - if err != nil { - return nil - } - o.Data["user"] = UserFromJson(bytes.NewReader(buf)) - } - ev.Data = o.Data - ev.Broadcast = o.Broadcast - ev.Sequence = o.Sequence - return &ev -} - -// WebSocketResponse represents a response received through the WebSocket -// for a request made to the server. This is available through the ResponseChannel -// channel in WebSocketClient. -type WebSocketResponse struct { - Status string `json:"status"` // The status of the response. For example: OK, FAIL. - SeqReply int64 `json:"seq_reply,omitempty"` // A counter which is incremented for every response sent. - Data map[string]interface{} `json:"data,omitempty"` // The data contained in the response. - Error *AppError `json:"error,omitempty"` // A field that is set if any error has occurred. -} - -func (m *WebSocketResponse) Add(key string, value interface{}) { - m.Data[key] = value -} - -func NewWebSocketResponse(status string, seqReply int64, data map[string]interface{}) *WebSocketResponse { - return &WebSocketResponse{Status: status, SeqReply: seqReply, Data: data} -} - -func NewWebSocketError(seqReply int64, err *AppError) *WebSocketResponse { - return &WebSocketResponse{Status: STATUS_FAIL, SeqReply: seqReply, Error: err} -} - -func (m *WebSocketResponse) IsValid() bool { - return m.Status != "" -} - -func (m *WebSocketResponse) EventType() string { - return WEBSOCKET_EVENT_RESPONSE -} - -func (m *WebSocketResponse) ToJson() string { - b, _ := json.Marshal(m) - return string(b) -} - -func WebSocketResponseFromJson(data io.Reader) *WebSocketResponse { - var o *WebSocketResponse - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_request.go deleted file mode 100644 index 6628a5c9..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_request.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package model - -import ( - "encoding/json" - "io" - - goi18n "github.com/mattermost/go-i18n/i18n" -) - -// WebSocketRequest represents a request made to the server through a websocket. -type WebSocketRequest struct { - // Client-provided fields - Seq int64 `json:"seq"` // A counter which is incremented for every request made. - Action string `json:"action"` // The action to perform for a request. For example: get_statuses, user_typing. - Data map[string]interface{} `json:"data"` // The metadata for an action. - - // Server-provided fields - Session Session `json:"-"` - T goi18n.TranslateFunc `json:"-"` - Locale string `json:"-"` -} - -func (o *WebSocketRequest) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func WebSocketRequestFromJson(data io.Reader) *WebSocketRequest { - var o *WebSocketRequest - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/fileutils/fileutils.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/fileutils/fileutils.go deleted file mode 100644 index 62ad3ad8..00000000 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/fileutils/fileutils.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package fileutils - -import ( - "os" - "path/filepath" -) - -var ( - commonBaseSearchPaths = []string{ - ".", - "..", - "../..", - "../../..", - } -) - -func findPath(path string, baseSearchPaths []string, workingDirFirst bool, filter func(os.FileInfo) bool) string { - if filepath.IsAbs(path) { - if _, err := os.Stat(path); err == nil { - return path - } - - return "" - } - - searchPaths := []string{} - if workingDirFirst { - searchPaths = append(searchPaths, baseSearchPaths...) - } - - // Attempt to search relative to the location of the running binary either before - // or after searching relative to the working directory, depending on `workingDirFirst`. - var binaryDir string - if exe, err := os.Executable(); err == nil { - if exe, err = filepath.EvalSymlinks(exe); err == nil { - if exe, err = filepath.Abs(exe); err == nil { - binaryDir = filepath.Dir(exe) - } - } - } - if binaryDir != "" { - for _, baseSearchPath := range baseSearchPaths { - searchPaths = append( - searchPaths, - filepath.Join(binaryDir, baseSearchPath), - ) - } - } - - if !workingDirFirst { - searchPaths = append(searchPaths, baseSearchPaths...) - } - - for _, parent := range searchPaths { - found, err := filepath.Abs(filepath.Join(parent, path)) - if err != nil { - continue - } else if fileInfo, err := os.Stat(found); err == nil { - if filter != nil { - if filter(fileInfo) { - return found - } - } else { - return found - } - } - } - - return "" -} - -func FindPath(path string, baseSearchPaths []string, filter func(os.FileInfo) bool) string { - return findPath(path, baseSearchPaths, true, filter) -} - -// FindFile looks for the given file in nearby ancestors relative to the current working -// directory as well as the directory of the executable. -func FindFile(path string) string { - return FindPath(path, commonBaseSearchPaths, func(fileInfo os.FileInfo) bool { - return !fileInfo.IsDir() - }) -} - -// fileutils.FindDir looks for the given directory in nearby ancestors relative to the current working -// directory as well as the directory of the executable, falling back to `./` if not found. -func FindDir(dir string) (string, bool) { - found := FindPath(dir, commonBaseSearchPaths, func(fileInfo os.FileInfo) bool { - return fileInfo.IsDir() - }) - if found == "" { - return "./", false - } - - return found, true -} - -// FindDirRelBinary looks for the given directory in nearby ancestors relative to the -// directory of the executable, then relative to the working directory, falling back to `./` if not found. -func FindDirRelBinary(dir string) (string, bool) { - found := findPath(dir, commonBaseSearchPaths, false, func(fileInfo os.FileInfo) bool { - return fileInfo.IsDir() - }) - if found == "" { - return "./", false - } - return found, true -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/v6/LICENSE.txt similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt rename to vendor/github.com/mattermost/mattermost-server/v6/LICENSE.txt index b40b5e58..8ced25a1 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/v6/LICENSE.txt @@ -11,7 +11,7 @@ You may be licensed to use source code to create compiled versions not produced 1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or 2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com -You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, model/, +You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, i18n/, model/, plugin/ and all subdirectories thereof) under the Apache License v2.0. We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not @@ -22,7 +22,7 @@ a “modified version” or “work based on” Mattermost as these terms are de MATTERMOST TRADEMARK GUIDELINES Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark -Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions +Standards of Use at https://mattermost.com/trademark-standards-of-use/. For trademark approval or any questions you have about using these trademarks, please email trademark@mattermost.com ------------------------------------------------------------------------------------------------------------------------------ diff --git a/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt b/vendor/github.com/mattermost/mattermost-server/v6/NOTICE.txt similarity index 85% rename from vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt rename to vendor/github.com/mattermost/mattermost-server/v6/NOTICE.txt index 0316f702..3f186ef7 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt +++ b/vendor/github.com/mattermost/mattermost-server/v6/NOTICE.txt @@ -864,7 +864,7 @@ Mozilla Public License Version 2.0 means any form of the work other than Source Code Form. 1.7. "Larger Work" - means a work that combines Covered Software with other material, in + means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" @@ -1387,7 +1387,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This product contains 'durafmt' by Wesley Hill. -:clock8: Better time duration formatting in Go! +:clock8: Better time duration formatting in Go! * HOMEPAGE: * https://github.com/hako/durafmt @@ -3657,7 +3657,7 @@ lumberjack is a log rolling package for Go The MIT License (MIT) -Copyright (c) 2014 Nate Finch +Copyright (c) 2014 Nate Finch Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -4132,3 +4132,713 @@ A caching, resizing image proxy written in Go of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS + +--- + +## oov/psd + +This product contains 'psd' by oov. + +A PSD/PSB file reader for go + +* HOMEPAGE: + * https://github.com/oov/psd + +* LICENSE: MIT + +MIT License + +Copyright (c) 2016 oov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +## gopherjs + +This product contains 'gopherjs' by Richard Musiol. + +A Go code to javascript code compiler. + +* HOMEPAGE: + * https://github.com/gopherjs/gopherjs + +* LICENSE: + +Copyright (c) 2013 Richard Musiol. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +## AWS SDK for Go + +This product contains 'aws-sdk' by Amazon. + +AWS-SDK support for the Go language. + +* HOMEPAGE: + * https://github.com/aws/aws-sdk-go + +* LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +--- + +## semver + +This product contains 'semver' by Masterminds. + +The semver package provides the ability to work with Semantic Versions in Go. + +* HOMEPAGE: + * https://github.com/Masterminds/semver + +* LICENSE: + +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +--- + +## Date Constraints + +This product contains 'dateconstraints' by Eli Yukelzon. + +Go library to validate a date against constraints + +* HOMEPAGE: + * https://github.com/reflog/dateconstraints + +* LICENSE: + +MIT License + +Copyright (c) 2020 Eli Yukelzon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +## Archiver + +This product contains 'archiver' by Matthew Holt + +A library to handle different archive files (zip, rar, tar.gz...) + +* HOMEPAGE: + * https://github.com/mholt/archiver + +* LICENSE: + +MIT License + +Copyright (c) 2016 Matthew Holt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +## PDF Reader library + +This product contains 'pdf' by the Go team and modified by Thuc Le + +A library to provide pdf reading support + +* HOMEPAGE: + * https://github.com/ledongthuc/pdf + +* LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +## GoOse + +This product contains 'GoOse' by Antonio Linari + +A library to provide html text extraction support + +* HOMEPAGE: + * https://github.com/advancedlogic/GoOse + +* LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--- + +## Docconv + +This product contains 'docconv' by Sajari Pty Ltd + +A library to provide text extraction support for different documents + +* HOMEPAGE: + * https://github.com/sajari/docconv + +* LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Sajari Pty Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +--- + +## JWT-Go + +This product contains `jwt-go` by Dave Grijalva + +* HOMEPAGE: + * https://github.com/dgrijalva/jwt-go + +* LICENSE: + +The MIT License (MIT) + +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of +the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/access.go b/vendor/github.com/mattermost/mattermost-server/v6/model/access.go similarity index 68% rename from vendor/github.com/mattermost/mattermost-server/v5/model/access.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/access.go index bbac3601..f17c8fbe 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/access.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/access.go @@ -4,15 +4,13 @@ package model import ( - "encoding/json" - "io" "net/http" ) const ( - ACCESS_TOKEN_GRANT_TYPE = "authorization_code" - ACCESS_TOKEN_TYPE = "bearer" - REFRESH_TOKEN_GRANT_TYPE = "refresh_token" + AccessTokenGrantType = "authorization_code" + AccessTokenType = "bearer" + RefreshTokenGrantType = "refresh_token" ) type AccessData struct { @@ -31,17 +29,17 @@ type AccessResponse struct { ExpiresIn int32 `json:"expires_in"` Scope string `json:"scope"` RefreshToken string `json:"refresh_token"` + IdToken string `json:"id_token"` } // IsValid validates the AccessData and returns an error if it isn't configured // correctly. func (ad *AccessData) IsValid() *AppError { - - if len(ad.ClientId) == 0 || len(ad.ClientId) > 26 { + if ad.ClientId == "" || len(ad.ClientId) > 26 { return NewAppError("AccessData.IsValid", "model.access.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) } - if len(ad.UserId) == 0 || len(ad.UserId) > 26 { + if ad.UserId == "" || len(ad.UserId) > 26 { return NewAppError("AccessData.IsValid", "model.access.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } @@ -53,7 +51,7 @@ func (ad *AccessData) IsValid() *AppError { return NewAppError("AccessData.IsValid", "model.access.is_valid.refresh_token.app_error", nil, "", http.StatusBadRequest) } - if len(ad.RedirectUri) == 0 || len(ad.RedirectUri) > 256 || !IsValidHttpUrl(ad.RedirectUri) { + if ad.RedirectUri == "" || len(ad.RedirectUri) > 256 || !IsValidHTTPURL(ad.RedirectUri) { return NewAppError("AccessData.IsValid", "model.access.is_valid.redirect_uri.app_error", nil, "", http.StatusBadRequest) } @@ -72,25 +70,3 @@ func (ad *AccessData) IsExpired() bool { return false } - -func (ad *AccessData) ToJson() string { - b, _ := json.Marshal(ad) - return string(b) -} - -func AccessDataFromJson(data io.Reader) *AccessData { - var ad *AccessData - json.NewDecoder(data).Decode(&ad) - return ad -} - -func (ar *AccessResponse) ToJson() string { - b, _ := json.Marshal(ar) - return string(b) -} - -func AccessResponseFromJson(data io.Reader) *AccessResponse { - var ar *AccessResponse - json.NewDecoder(data).Decode(&ar) - return ar -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/analytics_row.go b/vendor/github.com/mattermost/mattermost-server/v6/model/analytics_row.go new file mode 100644 index 00000000..72ba3a09 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/analytics_row.go @@ -0,0 +1,11 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type AnalyticsRow struct { + Name string `json:"name"` + Value float64 `json:"value"` +} + +type AnalyticsRows []*AnalyticsRow diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/audit.go b/vendor/github.com/mattermost/mattermost-server/v6/model/audit.go similarity index 64% rename from vendor/github.com/mattermost/mattermost-server/v5/model/audit.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/audit.go index dd1e0602..3e8345c7 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/audit.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/audit.go @@ -3,11 +3,6 @@ package model -import ( - "encoding/json" - "io" -) - type Audit struct { Id string `json:"id"` CreateAt int64 `json:"create_at"` @@ -17,14 +12,3 @@ type Audit struct { IpAddress string `json:"ip_address"` SessionId string `json:"session_id"` } - -func (o *Audit) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func AuditFromJson(data io.Reader) *Audit { - var o *Audit - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/auditconv.go b/vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go similarity index 84% rename from vendor/github.com/mattermost/mattermost-server/v5/model/auditconv.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go index 50af2880..1bcb2363 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/auditconv.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go @@ -3,7 +3,11 @@ package model -import "github.com/francoispqt/gojay" +import ( + "strings" + + "github.com/francoispqt/gojay" +) // AuditModelTypeConv converts key model types to something better suited for audit output. func AuditModelTypeConv(val interface{}) (newVal interface{}, converted bool) { @@ -13,42 +17,88 @@ func AuditModelTypeConv(val interface{}) (newVal interface{}, converted bool) { switch v := val.(type) { case *Channel: return newAuditChannel(v), true + case Channel: + return newAuditChannel(&v), true case *Team: return newAuditTeam(v), true + case Team: + return newAuditTeam(&v), true case *User: return newAuditUser(v), true + case User: + return newAuditUser(&v), true + case *UserPatch: + return newAuditUserPatch(v), true + case UserPatch: + return newAuditUserPatch(&v), true case *Command: return newAuditCommand(v), true + case Command: + return newAuditCommand(&v), true case *CommandArgs: return newAuditCommandArgs(v), true + case CommandArgs: + return newAuditCommandArgs(&v), true case *Bot: return newAuditBot(v), true + case Bot: + return newAuditBot(&v), true case *ChannelModerationPatch: return newAuditChannelModerationPatch(v), true + case ChannelModerationPatch: + return newAuditChannelModerationPatch(&v), true case *Emoji: return newAuditEmoji(v), true + case Emoji: + return newAuditEmoji(&v), true case *FileInfo: return newAuditFileInfo(v), true + case FileInfo: + return newAuditFileInfo(&v), true case *Group: return newAuditGroup(v), true + case Group: + return newAuditGroup(&v), true case *Job: return newAuditJob(v), true + case Job: + return newAuditJob(&v), true case *OAuthApp: return newAuditOAuthApp(v), true + case OAuthApp: + return newAuditOAuthApp(&v), true case *Post: return newAuditPost(v), true + case Post: + return newAuditPost(&v), true case *Role: return newAuditRole(v), true + case Role: + return newAuditRole(&v), true case *Scheme: return newAuditScheme(v), true + case Scheme: + return newAuditScheme(&v), true case *SchemeRoles: return newAuditSchemeRoles(v), true + case SchemeRoles: + return newAuditSchemeRoles(&v), true case *Session: return newAuditSession(v), true + case Session: + return newAuditSession(&v), true case *IncomingWebhook: return newAuditIncomingWebhook(v), true + case IncomingWebhook: + return newAuditIncomingWebhook(&v), true case *OutgoingWebhook: return newAuditOutgoingWebhook(v), true + case OutgoingWebhook: + return newAuditOutgoingWebhook(&v), true + case *RemoteCluster: + return newRemoteCluster(v), true + case RemoteCluster: + return newRemoteCluster(&v), true } return val, false } @@ -56,7 +106,7 @@ func AuditModelTypeConv(val interface{}) (newVal interface{}, converted bool) { type auditChannel struct { ID string Name string - Type string + Type ChannelType } // newAuditChannel creates a simplified representation of Channel for output to audit log. @@ -73,7 +123,7 @@ func newAuditChannel(c *Channel) auditChannel { func (c auditChannel) MarshalJSONObject(enc *gojay.Encoder) { enc.StringKey("id", c.ID) enc.StringKey("name", c.Name) - enc.StringKey("type", c.Type) + enc.StringKey("type", string(c.Type)) } func (c auditChannel) IsNil() bool { @@ -124,6 +174,21 @@ func newAuditUser(u *User) auditUser { return user } +type auditUserPatch struct { + Name string +} + +// newAuditUserPatch creates a simplified representation of UserPatch for output to audit log. +func newAuditUserPatch(up *UserPatch) auditUserPatch { + var userPatch auditUserPatch + if up != nil { + if up.Username != nil { + userPatch.Name = *up.Username + } + } + return userPatch +} + func (u auditUser) MarshalJSONObject(enc *gojay.Encoder) { enc.StringKey("id", u.ID) enc.StringKey("name", u.Name) @@ -205,7 +270,10 @@ func newAuditCommandArgs(ca *CommandArgs) auditCommandArgs { cmdargs.ChannelID = ca.ChannelId cmdargs.TeamID = ca.TeamId cmdargs.TriggerID = ca.TriggerId - cmdargs.Command = ca.Command + cmdFields := strings.Fields(ca.Command) + if len(cmdFields) > 0 { + cmdargs.Command = cmdFields[0] + } } return cmdargs } @@ -665,3 +733,45 @@ func (h auditOutgoingWebhook) MarshalJSONObject(enc *gojay.Encoder) { func (h auditOutgoingWebhook) IsNil() bool { return false } + +type auditRemoteCluster struct { + RemoteId string + RemoteTeamId string + Name string + DisplayName string + SiteURL string + CreateAt int64 + LastPingAt int64 + CreatorId string +} + +// newRemoteCluster creates a simplified representation of RemoteCluster for output to audit log. +func newRemoteCluster(r *RemoteCluster) auditRemoteCluster { + var rc auditRemoteCluster + if r != nil { + rc.RemoteId = r.RemoteId + rc.RemoteTeamId = r.RemoteTeamId + rc.Name = r.Name + rc.DisplayName = r.DisplayName + rc.SiteURL = r.SiteURL + rc.CreateAt = r.CreateAt + rc.LastPingAt = r.LastPingAt + rc.CreatorId = r.CreatorId + } + return rc +} + +func (r auditRemoteCluster) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("remote_id", r.RemoteId) + enc.StringKey("remote_team_id", r.RemoteTeamId) + enc.StringKey("name", r.Name) + enc.StringKey("display_name", r.DisplayName) + enc.StringKey("site_url", r.SiteURL) + enc.Int64Key("create_at", r.CreateAt) + enc.Int64Key("last_ping_at", r.LastPingAt) + enc.StringKey("creator_id", r.CreatorId) +} + +func (r auditRemoteCluster) IsNil() bool { + return false +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/audits.go b/vendor/github.com/mattermost/mattermost-server/v6/model/audits.go new file mode 100644 index 00000000..1c547c89 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/audits.go @@ -0,0 +1,14 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type Audits []Audit + +func (o Audits) Etag() string { + if len(o) > 0 { + // the first in the list is always the most current + return Etag(o[0].CreateAt) + } + return "" +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/authorize.go b/vendor/github.com/mattermost/mattermost-server/v6/model/authorize.go similarity index 76% rename from vendor/github.com/mattermost/mattermost-server/v5/model/authorize.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/authorize.go index 0191a670..1a767e3c 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/authorize.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/authorize.go @@ -4,16 +4,14 @@ package model import ( - "encoding/json" - "io" "net/http" ) const ( - AUTHCODE_EXPIRE_TIME = 60 * 10 // 10 minutes - AUTHCODE_RESPONSE_TYPE = "code" - IMPLICIT_RESPONSE_TYPE = "token" - DEFAULT_SCOPE = "user" + AuthCodeExpireTime = 60 * 10 // 10 minutes + AuthCodeResponseType = "code" + ImplicitResponseType = "token" + DefaultScope = "user" ) type AuthData struct { @@ -30,7 +28,7 @@ type AuthData struct { type AuthorizeRequest struct { ResponseType string `json:"response_type"` ClientId string `json:"client_id"` - RedirectUri string `json:"redirect_uri"` + RedirectURI string `json:"redirect_uri"` Scope string `json:"scope"` State string `json:"state"` } @@ -47,7 +45,7 @@ func (ad *AuthData) IsValid() *AppError { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } - if len(ad.Code) == 0 || len(ad.Code) > 128 { + if ad.Code == "" || len(ad.Code) > 128 { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.auth_code.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) } @@ -59,7 +57,7 @@ func (ad *AuthData) IsValid() *AppError { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.create_at.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) } - if len(ad.RedirectUri) > 256 || !IsValidHttpUrl(ad.RedirectUri) { + if len(ad.RedirectUri) > 256 || !IsValidHTTPURL(ad.RedirectUri) { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) } @@ -82,11 +80,11 @@ func (ar *AuthorizeRequest) IsValid() *AppError { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) } - if len(ar.ResponseType) == 0 { + if ar.ResponseType == "" { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.response_type.app_error", nil, "", http.StatusBadRequest) } - if len(ar.RedirectUri) == 0 || len(ar.RedirectUri) > 256 || !IsValidHttpUrl(ar.RedirectUri) { + if ar.RedirectURI == "" || len(ar.RedirectURI) > 256 || !IsValidHTTPURL(ar.RedirectURI) { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ar.ClientId, http.StatusBadRequest) } @@ -103,40 +101,18 @@ func (ar *AuthorizeRequest) IsValid() *AppError { func (ad *AuthData) PreSave() { if ad.ExpiresIn == 0 { - ad.ExpiresIn = AUTHCODE_EXPIRE_TIME + ad.ExpiresIn = AuthCodeExpireTime } if ad.CreateAt == 0 { ad.CreateAt = GetMillis() } - if len(ad.Scope) == 0 { - ad.Scope = DEFAULT_SCOPE + if ad.Scope == "" { + ad.Scope = DefaultScope } } -func (ad *AuthData) ToJson() string { - b, _ := json.Marshal(ad) - return string(b) -} - -func AuthDataFromJson(data io.Reader) *AuthData { - var ad *AuthData - json.NewDecoder(data).Decode(&ad) - return ad -} - -func (ar *AuthorizeRequest) ToJson() string { - b, _ := json.Marshal(ar) - return string(b) -} - -func AuthorizeRequestFromJson(data io.Reader) *AuthorizeRequest { - var ar *AuthorizeRequest - json.NewDecoder(data).Decode(&ar) - return ar -} - func (ad *AuthData) IsExpired() bool { return GetMillis() > ad.CreateAt+int64(ad.ExpiresIn*1000) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go b/vendor/github.com/mattermost/mattermost-server/v6/model/bot.go similarity index 75% rename from vendor/github.com/mattermost/mattermost-server/v5/model/bot.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/bot.go index fb46be49..7b581089 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/bot.go @@ -4,19 +4,18 @@ package model import ( - "encoding/json" "fmt" - "io" "net/http" "strings" "unicode/utf8" ) const ( - BOT_DISPLAY_NAME_MAX_RUNES = USER_FIRST_NAME_MAX_RUNES - BOT_DESCRIPTION_MAX_RUNES = 1024 - BOT_CREATOR_ID_MAX_RUNES = KEY_VALUE_PLUGIN_ID_MAX_RUNES // UserId or PluginId - BOT_WARN_METRIC_BOT_USERNAME = "mattermost-advisor" + BotDisplayNameMaxRunes = UserFirstNameMaxRunes + BotDescriptionMaxRunes = 1024 + BotCreatorIdMaxRunes = KeyValuePluginIdMaxRunes // UserId or PluginId + BotWarnMetricBotUsername = "mattermost-advisor" + BotSystemBotUsername = "system-bot" ) // Bot is a special type of User meant for programmatic interactions. @@ -64,28 +63,33 @@ func (b *Bot) Clone() *Bot { return © } -// IsValid validates the bot and returns an error if it isn't configured correctly. -func (b *Bot) IsValid() *AppError { - if !IsValidId(b.UserId) { - return NewAppError("Bot.IsValid", "model.bot.is_valid.user_id.app_error", b.Trace(), "", http.StatusBadRequest) - } - +// IsValidCreate validates bot for Create call. This skips validations of fields that are auto-filled on Create +func (b *Bot) IsValidCreate() *AppError { if !IsValidUsername(b.Username) { return NewAppError("Bot.IsValid", "model.bot.is_valid.username.app_error", b.Trace(), "", http.StatusBadRequest) } - if utf8.RuneCountInString(b.DisplayName) > BOT_DISPLAY_NAME_MAX_RUNES { + if utf8.RuneCountInString(b.DisplayName) > BotDisplayNameMaxRunes { return NewAppError("Bot.IsValid", "model.bot.is_valid.user_id.app_error", b.Trace(), "", http.StatusBadRequest) } - if utf8.RuneCountInString(b.Description) > BOT_DESCRIPTION_MAX_RUNES { + if utf8.RuneCountInString(b.Description) > BotDescriptionMaxRunes { return NewAppError("Bot.IsValid", "model.bot.is_valid.description.app_error", b.Trace(), "", http.StatusBadRequest) } - if len(b.OwnerId) == 0 || utf8.RuneCountInString(b.OwnerId) > BOT_CREATOR_ID_MAX_RUNES { + if b.OwnerId == "" || utf8.RuneCountInString(b.OwnerId) > BotCreatorIdMaxRunes { return NewAppError("Bot.IsValid", "model.bot.is_valid.creator_id.app_error", b.Trace(), "", http.StatusBadRequest) } + return nil +} + +// IsValid validates the bot and returns an error if it isn't configured correctly. +func (b *Bot) IsValid() *AppError { + if !IsValidId(b.UserId) { + return NewAppError("Bot.IsValid", "model.bot.is_valid.user_id.app_error", b.Trace(), "", http.StatusBadRequest) + } + if b.CreateAt == 0 { return NewAppError("Bot.IsValid", "model.bot.is_valid.create_at.app_error", b.Trace(), "", http.StatusBadRequest) } @@ -93,8 +97,7 @@ func (b *Bot) IsValid() *AppError { if b.UpdateAt == 0 { return NewAppError("Bot.IsValid", "model.bot.is_valid.update_at.app_error", b.Trace(), "", http.StatusBadRequest) } - - return nil + return b.IsValidCreate() } // PreSave should be run before saving a new bot to the database. @@ -114,20 +117,9 @@ func (b *Bot) Etag() string { return Etag(b.UserId, b.UpdateAt) } -// ToJson serializes the bot to json. -func (b *Bot) ToJson() []byte { - data, _ := json.Marshal(b) - return data -} - -// BotFromJson deserializes a bot from json. -func BotFromJson(data io.Reader) *Bot { - var bot *Bot - json.NewDecoder(data).Decode(&bot) - return bot -} - // Patch modifies an existing bot with optional fields from the given patch. +// TODO 6.0: consider returning a boolean to indicate whether or not the patch +// applied any changes. func (b *Bot) Patch(patch *BotPatch) { if patch.Username != nil { b.Username = *patch.Username @@ -142,26 +134,21 @@ func (b *Bot) Patch(patch *BotPatch) { } } -// ToJson serializes the bot patch to json. -func (b *BotPatch) ToJson() []byte { - data, err := json.Marshal(b) - if err != nil { - return nil +// WouldPatch returns whether or not the given patch would be applied or not. +func (b *Bot) WouldPatch(patch *BotPatch) bool { + if patch == nil { + return false } - - return data -} - -// BotPatchFromJson deserializes a bot patch from json. -func BotPatchFromJson(data io.Reader) *BotPatch { - decoder := json.NewDecoder(data) - var botPatch BotPatch - err := decoder.Decode(&botPatch) - if err != nil { - return nil + if patch.Username != nil && *patch.Username != b.Username { + return true } - - return &botPatch + if patch.DisplayName != nil && *patch.DisplayName != b.DisplayName { + return true + } + if patch.Description != nil && *patch.Description != b.Description { + return true + } + return false } // UserFromBot returns a user model describing the bot fields stored in the User store. @@ -171,7 +158,7 @@ func UserFromBot(b *Bot) *User { Username: b.Username, Email: NormalizeEmail(fmt.Sprintf("%s@localhost", b.Username)), FirstName: b.DisplayName, - Roles: SYSTEM_USER_ROLE_ID, + Roles: SystemUserRoleId, } } @@ -181,23 +168,10 @@ func BotFromUser(u *User) *Bot { OwnerId: u.Id, UserId: u.Id, Username: u.Username, - DisplayName: u.GetDisplayName(SHOW_USERNAME), + DisplayName: u.GetDisplayName(ShowUsername), } } -// BotListFromJson deserializes a list of bots from json. -func BotListFromJson(data io.Reader) BotList { - var bots BotList - json.NewDecoder(data).Decode(&bots) - return bots -} - -// ToJson serializes a list of bots to json. -func (l *BotList) ToJson() []byte { - b, _ := json.Marshal(l) - return b -} - // Etag computes the etag for a list of bots. func (l *BotList) Etag() string { id := "0" @@ -222,7 +196,7 @@ func MakeBotNotFoundError(userId string) *AppError { } func IsBotDMChannel(channel *Channel, botUserID string) bool { - if channel.Type != CHANNEL_DIRECT { + if channel.Type != ChannelTypeDirect { return false } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/builtin.go b/vendor/github.com/mattermost/mattermost-server/v6/model/builtin.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/builtin.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/builtin.go diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/bulk_export.go b/vendor/github.com/mattermost/mattermost-server/v6/model/bulk_export.go new file mode 100644 index 00000000..b18c32a3 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/bulk_export.go @@ -0,0 +1,13 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// ExportDataDir is the name of the directory were to store additional data +// included with the export (e.g. file attachments). +const ExportDataDir = "data" + +type BulkExportOpts struct { + IncludeAttachments bool + CreateArchive bool +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/bundle_info.go b/vendor/github.com/mattermost/mattermost-server/v6/model/bundle_info.go similarity index 91% rename from vendor/github.com/mattermost/mattermost-server/v5/model/bundle_info.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/bundle_info.go index 429e1c3d..63969de7 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/bundle_info.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/bundle_info.go @@ -3,7 +3,9 @@ package model -import "github.com/mattermost/mattermost-server/v5/mlog" +import ( + "github.com/mattermost/mattermost-server/v6/shared/mlog" +) type BundleInfo struct { Path string diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go similarity index 55% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel.go index 282271ad..32742582 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go @@ -7,6 +7,7 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" + "errors" "io" "net/http" "sort" @@ -14,43 +15,50 @@ import ( "unicode/utf8" ) +type ChannelType string + const ( - CHANNEL_OPEN = "O" - CHANNEL_PRIVATE = "P" - CHANNEL_DIRECT = "D" - CHANNEL_GROUP = "G" - CHANNEL_GROUP_MAX_USERS = 8 - CHANNEL_GROUP_MIN_USERS = 3 - DEFAULT_CHANNEL = "town-square" - CHANNEL_DISPLAY_NAME_MAX_RUNES = 64 - CHANNEL_NAME_MIN_LENGTH = 2 - CHANNEL_NAME_MAX_LENGTH = 64 - CHANNEL_HEADER_MAX_RUNES = 1024 - CHANNEL_PURPOSE_MAX_RUNES = 250 - CHANNEL_CACHE_SIZE = 25000 - - CHANNEL_SORT_BY_USERNAME = "username" - CHANNEL_SORT_BY_STATUS = "status" + ChannelTypeOpen ChannelType = "O" + ChannelTypePrivate ChannelType = "P" + ChannelTypeDirect ChannelType = "D" + ChannelTypeGroup ChannelType = "G" + + ChannelGroupMaxUsers = 8 + ChannelGroupMinUsers = 3 + DefaultChannelName = "town-square" + ChannelDisplayNameMaxRunes = 64 + ChannelNameMinLength = 1 + ChannelNameMaxLength = 64 + ChannelHeaderMaxRunes = 1024 + ChannelPurposeMaxRunes = 250 + ChannelCacheSize = 25000 + + ChannelSortByUsername = "username" + ChannelSortByStatus = "status" ) type Channel struct { - Id string `json:"id"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - TeamId string `json:"team_id"` - Type string `json:"type"` - DisplayName string `json:"display_name"` - Name string `json:"name"` - Header string `json:"header"` - Purpose string `json:"purpose"` - LastPostAt int64 `json:"last_post_at"` - TotalMsgCount int64 `json:"total_msg_count"` - ExtraUpdateAt int64 `json:"extra_update_at"` - CreatorId string `json:"creator_id"` - SchemeId *string `json:"scheme_id"` - Props map[string]interface{} `json:"props" db:"-"` - GroupConstrained *bool `json:"group_constrained"` + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + TeamId string `json:"team_id"` + Type ChannelType `json:"type"` + DisplayName string `json:"display_name"` + Name string `json:"name"` + Header string `json:"header"` + Purpose string `json:"purpose"` + LastPostAt int64 `json:"last_post_at"` + TotalMsgCount int64 `json:"total_msg_count"` + ExtraUpdateAt int64 `json:"extra_update_at"` + CreatorId string `json:"creator_id"` + SchemeId *string `json:"scheme_id"` + Props map[string]interface{} `json:"props"` + GroupConstrained *bool `json:"group_constrained"` + Shared *bool `json:"shared"` + TotalMsgCountRoot int64 `json:"total_msg_count_root"` + PolicyID *string `json:"policy_id"` + LastRootPostAt int64 `json:"last_root_post_at"` } type ChannelWithTeamData struct { @@ -61,8 +69,8 @@ type ChannelWithTeamData struct { } type ChannelsWithCount struct { - Channels *ChannelListWithTeamData `json:"channels"` - TotalCount int64 `json:"total_count"` + Channels ChannelListWithTeamData `json:"channels"` + TotalCount int64 `json:"total_count"` } type ChannelPatch struct { @@ -120,83 +128,70 @@ type ChannelModeratedRolesPatch struct { // PerPage number of results per page, if paginated. // type ChannelSearchOpts struct { - NotAssociatedToGroup string - ExcludeDefaultChannels bool - IncludeDeleted bool - Deleted bool - ExcludeChannelNames []string - TeamIds []string - GroupConstrained bool - ExcludeGroupConstrained bool - Public bool - Private bool - Page *int - PerPage *int + NotAssociatedToGroup string + ExcludeDefaultChannels bool + IncludeDeleted bool + Deleted bool + ExcludeChannelNames []string + TeamIds []string + GroupConstrained bool + ExcludeGroupConstrained bool + PolicyID string + ExcludePolicyConstrained bool + IncludePolicyID bool + Public bool + Private bool + Page *int + PerPage *int + LastDeleteAt int + LastUpdateAt int } type ChannelMemberCountByGroup struct { - GroupId string `db:"-" json:"group_id"` - ChannelMemberCount int64 `db:"-" json:"channel_member_count"` - ChannelMemberTimezonesCount int64 `db:"-" json:"channel_member_timezones_count"` + GroupId string `json:"group_id"` + ChannelMemberCount int64 `json:"channel_member_count"` + ChannelMemberTimezonesCount int64 `json:"channel_member_timezones_count"` } -func (o *Channel) DeepCopy() *Channel { - copy := *o - if copy.SchemeId != nil { - copy.SchemeId = NewString(*o.SchemeId) - } - return © -} +type ChannelOption func(channel *Channel) -func (o *Channel) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func (o *ChannelPatch) ToJson() string { - b, _ := json.Marshal(o) - return string(b) +func WithID(ID string) ChannelOption { + return func(channel *Channel) { + channel.Id = ID + } } -func (o *ChannelsWithCount) ToJson() []byte { - b, _ := json.Marshal(o) - return b -} +// The following are some GraphQL methods necessary to return the +// data in float64 type. The spec doesn't support 64 bit integers, +// so we have to pass the data in float64. The _ at the end is +// a hack to keep the attribute name same in GraphQL schema. -func ChannelsWithCountFromJson(data io.Reader) *ChannelsWithCount { - var o *ChannelsWithCount - json.NewDecoder(data).Decode(&o) - return o +func (o *Channel) CreateAt_() float64 { + return float64(o.CreateAt) } -func ChannelFromJson(data io.Reader) *Channel { - var o *Channel - json.NewDecoder(data).Decode(&o) - return o +func (o *Channel) UpdateAt_() float64 { + return float64(o.UpdateAt) } -func ChannelPatchFromJson(data io.Reader) *ChannelPatch { - var o *ChannelPatch - json.NewDecoder(data).Decode(&o) - return o +func (o *Channel) DeleteAt_() float64 { + return float64(o.DeleteAt) } -func ChannelModerationsFromJson(data io.Reader) []*ChannelModeration { - var o []*ChannelModeration - json.NewDecoder(data).Decode(&o) - return o +func (o *Channel) LastPostAt_() float64 { + return float64(o.LastPostAt) } -func ChannelModerationsPatchFromJson(data io.Reader) []*ChannelModerationPatch { - var o []*ChannelModerationPatch - json.NewDecoder(data).Decode(&o) - return o +func (o *Channel) TotalMsgCount_() float64 { + return float64(o.TotalMsgCount) } -func ChannelMemberCountsByGroupFromJson(data io.Reader) []*ChannelMemberCountByGroup { - var o []*ChannelMemberCountByGroup - json.NewDecoder(data).Decode(&o) - return o +func (o *Channel) DeepCopy() *Channel { + copy := *o + if copy.SchemeId != nil { + copy.SchemeId = NewString(*o.SchemeId) + } + return © } func (o *Channel) Etag() string { @@ -216,23 +211,23 @@ func (o *Channel) IsValid() *AppError { return NewAppError("Channel.IsValid", "model.channel.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if utf8.RuneCountInString(o.DisplayName) > CHANNEL_DISPLAY_NAME_MAX_RUNES { + if utf8.RuneCountInString(o.DisplayName) > ChannelDisplayNameMaxRunes { return NewAppError("Channel.IsValid", "model.channel.is_valid.display_name.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if !IsValidChannelIdentifier(o.Name) { - return NewAppError("Channel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest) + return NewAppError("Channel.IsValid", "model.channel.is_valid.1_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if !(o.Type == CHANNEL_OPEN || o.Type == CHANNEL_PRIVATE || o.Type == CHANNEL_DIRECT || o.Type == CHANNEL_GROUP) { + if !(o.Type == ChannelTypeOpen || o.Type == ChannelTypePrivate || o.Type == ChannelTypeDirect || o.Type == ChannelTypeGroup) { return NewAppError("Channel.IsValid", "model.channel.is_valid.type.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if utf8.RuneCountInString(o.Header) > CHANNEL_HEADER_MAX_RUNES { + if utf8.RuneCountInString(o.Header) > ChannelHeaderMaxRunes { return NewAppError("Channel.IsValid", "model.channel.is_valid.header.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if utf8.RuneCountInString(o.Purpose) > CHANNEL_PURPOSE_MAX_RUNES { + if utf8.RuneCountInString(o.Purpose) > ChannelPurposeMaxRunes { return NewAppError("Channel.IsValid", "model.channel.is_valid.purpose.app_error", nil, "id="+o.Id, http.StatusBadRequest) } @@ -241,7 +236,7 @@ func (o *Channel) IsValid() *AppError { } userIds := strings.Split(o.Name, "__") - if o.Type != CHANNEL_DIRECT && len(userIds) == 2 && IsValidId(userIds[0]) && IsValidId(userIds[1]) { + if o.Type != ChannelTypeDirect && len(userIds) == 2 && IsValidId(userIds[0]) && IsValidId(userIds[1]) { return NewAppError("Channel.IsValid", "model.channel.is_valid.name.app_error", nil, "", http.StatusBadRequest) } @@ -268,11 +263,11 @@ func (o *Channel) PreUpdate() { } func (o *Channel) IsGroupOrDirect() bool { - return o.Type == CHANNEL_DIRECT || o.Type == CHANNEL_GROUP + return o.Type == ChannelTypeDirect || o.Type == ChannelTypeGroup } func (o *Channel) IsOpen() bool { - return o.Type == CHANNEL_OPEN + return o.Type == ChannelTypeOpen } func (o *Channel) Patch(patch *ChannelPatch) { @@ -313,8 +308,12 @@ func (o *Channel) IsGroupConstrained() bool { return o.GroupConstrained != nil && *o.GroupConstrained } +func (o *Channel) IsShared() bool { + return o.Shared != nil && *o.Shared +} + func (o *Channel) GetOtherUserIdForDM(userId string) string { - if o.Type != CHANNEL_DIRECT { + if o.Type != ChannelTypeDirect { return "" } @@ -333,12 +332,29 @@ func (o *Channel) GetOtherUserIdForDM(userId string) string { return otherUserId } +func (ChannelType) ImplementsGraphQLType(name string) bool { + return name == "ChannelType" +} + +func (t ChannelType) MarshalJSON() ([]byte, error) { + return json.Marshal(string(t)) +} + +func (t *ChannelType) UnmarshalGraphQL(input interface{}) error { + chType, ok := input.(string) + if !ok { + return errors.New("wrong type") + } + + *t = ChannelType(chType) + return nil +} + func GetDMNameFromIds(userId1, userId2 string) string { if userId1 > userId2 { return userId2 + "__" + userId1 - } else { - return userId1 + "__" + userId2 } + return userId1 + "__" + userId2 } func GetGroupDisplayNameFromUsers(users []*User, truncate bool) string { @@ -351,8 +367,8 @@ func GetGroupDisplayNameFromUsers(users []*User, truncate bool) string { name := strings.Join(usernames, ", ") - if truncate && len(name) > CHANNEL_NAME_MAX_LENGTH { - name = name[:CHANNEL_NAME_MAX_LENGTH] + if truncate && len(name) > ChannelNameMaxLength { + name = name[:ChannelNameMaxLength] } return name diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_count.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_count.go similarity index 74% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel_count.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel_count.go index 11ddeec4..17cea756 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_count.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_count.go @@ -5,20 +5,19 @@ package model import ( "crypto/md5" - "encoding/json" "fmt" - "io" "sort" "strconv" ) type ChannelCounts struct { Counts map[string]int64 `json:"counts"` + CountsRoot map[string]int64 `json:"counts_root"` UpdateTimes map[string]int64 `json:"update_times"` } func (o *ChannelCounts) Etag() string { - + // we don't include CountsRoot in ETag calculation, since it's a derivative ids := []string{} for id := range o.Counts { ids = append(ids, id) @@ -41,14 +40,3 @@ func (o *ChannelCounts) Etag() string { return Etag(md5Counts, update) } - -func (o *ChannelCounts) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func ChannelCountsFromJson(data io.Reader) *ChannelCounts { - var o *ChannelCounts - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_data.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_data.go similarity index 63% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel_data.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel_data.go index 0a1e0d57..083a3f44 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_data.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_data.go @@ -3,11 +3,6 @@ package model -import ( - "encoding/json" - "io" -) - type ChannelData struct { Channel *Channel `json:"channel"` Member *ChannelMember `json:"member"` @@ -21,14 +16,3 @@ func (o *ChannelData) Etag() string { return Etag(o.Channel.Id, o.Channel.UpdateAt, o.Channel.LastPostAt, mt) } - -func (o *ChannelData) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func ChannelDataFromJson(data io.Reader) *ChannelData { - var o *ChannelData - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_list.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_list.go similarity index 53% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel_list.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel_list.go index b47077ae..7b86b560 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_list.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_list.go @@ -3,21 +3,8 @@ package model -import ( - "encoding/json" - "io" -) - type ChannelList []*Channel -func (o *ChannelList) ToJson() string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - func (o *ChannelList) Etag() string { id := "0" @@ -40,28 +27,8 @@ func (o *ChannelList) Etag() string { return Etag(id, t, delta, len(*o)) } -func ChannelListFromJson(data io.Reader) *ChannelList { - var o *ChannelList - json.NewDecoder(data).Decode(&o) - return o -} - -func ChannelSliceFromJson(data io.Reader) []*Channel { - var o []*Channel - json.NewDecoder(data).Decode(&o) - return o -} - type ChannelListWithTeamData []*ChannelWithTeamData -func (o *ChannelListWithTeamData) ToJson() string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - func (o *ChannelListWithTeamData) Etag() string { id := "0" @@ -87,9 +54,3 @@ func (o *ChannelListWithTeamData) Etag() string { return Etag(id, t, delta, len(*o)) } - -func ChannelListWithTeamDataFromJson(data io.Reader) *ChannelListWithTeamData { - var o *ChannelListWithTeamData - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go new file mode 100644 index 00000000..cf26d3ea --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go @@ -0,0 +1,203 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" + "strings" +) + +const ( + ChannelNotifyDefault = "default" + ChannelNotifyAll = "all" + ChannelNotifyMention = "mention" + ChannelNotifyNone = "none" + ChannelMarkUnreadAll = "all" + ChannelMarkUnreadMention = "mention" + IgnoreChannelMentionsDefault = "default" + IgnoreChannelMentionsOff = "off" + IgnoreChannelMentionsOn = "on" + IgnoreChannelMentionsNotifyProp = "ignore_channel_mentions" +) + +type ChannelUnread struct { + TeamId string `json:"team_id"` + ChannelId string `json:"channel_id"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` + MentionCountRoot int64 `json:"mention_count_root"` + MsgCountRoot int64 `json:"msg_count_root"` + NotifyProps StringMap `json:"-"` +} + +type ChannelUnreadAt struct { + TeamId string `json:"team_id"` + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` + MentionCountRoot int64 `json:"mention_count_root"` + MsgCountRoot int64 `json:"msg_count_root"` + LastViewedAt int64 `json:"last_viewed_at"` + NotifyProps StringMap `json:"-"` +} + +type ChannelMember struct { + ChannelId string `json:"channel_id"` + UserId string `json:"user_id"` + Roles string `json:"roles"` + LastViewedAt int64 `json:"last_viewed_at"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` + MentionCountRoot int64 `json:"mention_count_root"` + MsgCountRoot int64 `json:"msg_count_root"` + NotifyProps StringMap `json:"notify_props"` + LastUpdateAt int64 `json:"last_update_at"` + SchemeGuest bool `json:"scheme_guest"` + SchemeUser bool `json:"scheme_user"` + SchemeAdmin bool `json:"scheme_admin"` + ExplicitRoles string `json:"explicit_roles"` +} + +// The following are some GraphQL methods necessary to return the +// data in float64 type. The spec doesn't support 64 bit integers, +// so we have to pass the data in float64. The _ at the end is +// a hack to keep the attribute name same in GraphQL schema. + +func (o *ChannelMember) LastViewedAt_() float64 { + return float64(o.LastViewedAt) +} + +func (o *ChannelMember) MsgCount_() float64 { + return float64(o.MsgCount) +} + +func (o *ChannelMember) MentionCount_() float64 { + return float64(o.MentionCount) +} + +func (o *ChannelMember) MentionCountRoot_() float64 { + return float64(o.MentionCountRoot) +} + +func (o *ChannelMember) LastUpdateAt_() float64 { + return float64(o.LastUpdateAt) +} + +// ChannelMemberWithTeamData contains ChannelMember appended with extra team information +// as well. +type ChannelMemberWithTeamData struct { + ChannelMember + TeamDisplayName string `json:"team_display_name"` + TeamName string `json:"team_name"` + TeamUpdateAt int64 `json:"team_update_at"` +} + +type ChannelMembers []ChannelMember + +type ChannelMembersWithTeamData []ChannelMemberWithTeamData + +type ChannelMemberForExport struct { + ChannelMember + ChannelName string + Username string +} + +func (o *ChannelMember) IsValid() *AppError { + if !IsValidId(o.ChannelId) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) + } + + if !IsValidId(o.UserId) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + notifyLevel := o.NotifyProps[DesktopNotifyProp] + if len(notifyLevel) > 20 || !IsChannelNotifyLevelValid(notifyLevel) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", nil, "notify_level="+notifyLevel, http.StatusBadRequest) + } + + markUnreadLevel := o.NotifyProps[MarkUnreadNotifyProp] + if len(markUnreadLevel) > 20 || !IsChannelMarkUnreadLevelValid(markUnreadLevel) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", nil, "mark_unread_level="+markUnreadLevel, http.StatusBadRequest) + } + + if pushLevel, ok := o.NotifyProps[PushNotifyProp]; ok { + if len(pushLevel) > 20 || !IsChannelNotifyLevelValid(pushLevel) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.push_level.app_error", nil, "push_notification_level="+pushLevel, http.StatusBadRequest) + } + } + + if sendEmail, ok := o.NotifyProps[EmailNotifyProp]; ok { + if len(sendEmail) > 20 || !IsSendEmailValid(sendEmail) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.email_value.app_error", nil, "push_notification_level="+sendEmail, http.StatusBadRequest) + } + } + + if ignoreChannelMentions, ok := o.NotifyProps[IgnoreChannelMentionsNotifyProp]; ok { + if len(ignoreChannelMentions) > 40 || !IsIgnoreChannelMentionsValid(ignoreChannelMentions) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.ignore_channel_mentions_value.app_error", nil, "ignore_channel_mentions="+ignoreChannelMentions, http.StatusBadRequest) + } + } + + if len(o.Roles) > UserRolesMaxLength { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.roles_limit.app_error", + map[string]interface{}{"Limit": UserRolesMaxLength}, "", http.StatusBadRequest) + } + + return nil +} + +func (o *ChannelMember) PreSave() { + o.LastUpdateAt = GetMillis() +} + +func (o *ChannelMember) PreUpdate() { + o.LastUpdateAt = GetMillis() +} + +func (o *ChannelMember) GetRoles() []string { + return strings.Fields(o.Roles) +} + +func (o *ChannelMember) SetChannelMuted(muted bool) { + if o.IsChannelMuted() { + o.NotifyProps[MarkUnreadNotifyProp] = ChannelMarkUnreadAll + } else { + o.NotifyProps[MarkUnreadNotifyProp] = ChannelMarkUnreadMention + } +} + +func (o *ChannelMember) IsChannelMuted() bool { + return o.NotifyProps[MarkUnreadNotifyProp] == ChannelMarkUnreadMention +} + +func IsChannelNotifyLevelValid(notifyLevel string) bool { + return notifyLevel == ChannelNotifyDefault || + notifyLevel == ChannelNotifyAll || + notifyLevel == ChannelNotifyMention || + notifyLevel == ChannelNotifyNone +} + +func IsChannelMarkUnreadLevelValid(markUnreadLevel string) bool { + return markUnreadLevel == ChannelMarkUnreadAll || markUnreadLevel == ChannelMarkUnreadMention +} + +func IsSendEmailValid(sendEmail string) bool { + return sendEmail == ChannelNotifyDefault || sendEmail == "true" || sendEmail == "false" +} + +func IsIgnoreChannelMentionsValid(ignoreChannelMentions string) bool { + return ignoreChannelMentions == IgnoreChannelMentionsOn || ignoreChannelMentions == IgnoreChannelMentionsOff || ignoreChannelMentions == IgnoreChannelMentionsDefault +} + +func GetDefaultChannelNotifyProps() StringMap { + return StringMap{ + DesktopNotifyProp: ChannelNotifyDefault, + MarkUnreadNotifyProp: ChannelMarkUnreadAll, + PushNotifyProp: ChannelNotifyDefault, + EmailNotifyProp: ChannelNotifyDefault, + IgnoreChannelMentionsNotifyProp: IgnoreChannelMentionsDefault, + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member_history.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel_member_history.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member_history_result.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel_member_history_result.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_mentions.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_mentions.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel_mentions.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel_mentions.go diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_search.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_search.go new file mode 100644 index 00000000..530deb20 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_search.go @@ -0,0 +1,22 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ChannelSearchDefaultLimit = 50 + +type ChannelSearch struct { + Term string `json:"term"` + ExcludeDefaultChannels bool `json:"exclude_default_channels"` + NotAssociatedToGroup string `json:"not_associated_to_group"` + TeamIds []string `json:"team_ids"` + GroupConstrained bool `json:"group_constrained"` + ExcludeGroupConstrained bool `json:"exclude_group_constrained"` + ExcludePolicyConstrained bool `json:"exclude_policy_constrained"` + Public bool `json:"public"` + Private bool `json:"private"` + IncludeDeleted bool `json:"include_deleted"` + Deleted bool `json:"deleted"` + Page *int `json:"page,omitempty"` + PerPage *int `json:"per_page,omitempty"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_sidebar.go similarity index 65% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel_sidebar.go index 6a79593c..d5d21112 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_sidebar.go @@ -5,7 +5,8 @@ package model import ( "encoding/json" - "io" + "errors" + "regexp" ) type SidebarCategoryType string @@ -36,15 +37,16 @@ const ( ) // SidebarCategory represents the corresponding DB table -// SortOrder is never returned to the user and only used for queries type SidebarCategory struct { Id string `json:"id"` UserId string `json:"user_id"` TeamId string `json:"team_id"` - SortOrder int64 `json:"-"` + SortOrder int64 `json:"sort_order"` Sorting SidebarCategorySorting `json:"sorting"` Type SidebarCategoryType `json:"type"` DisplayName string `json:"display_name"` + Muted bool `json:"muted"` + Collapsed bool `json:"collapsed"` } // SidebarCategoryWithChannels combines data from SidebarCategory table with the Channel IDs that belong to that category @@ -53,6 +55,10 @@ type SidebarCategoryWithChannels struct { Channels []string `json:"channel_ids"` } +func (sc SidebarCategoryWithChannels) ChannelIds() []string { + return sc.Channels +} + type SidebarCategoryOrder []string // OrderedSidebarCategories combines categories, their channel IDs and an array of Category IDs, sorted @@ -71,41 +77,50 @@ type SidebarChannel struct { type SidebarChannels []*SidebarChannel type SidebarCategoriesWithChannels []*SidebarCategoryWithChannels -func SidebarCategoryFromJson(data io.Reader) (*SidebarCategoryWithChannels, error) { - var o *SidebarCategoryWithChannels - err := json.NewDecoder(data).Decode(&o) - return o, err -} +var categoryIdPattern = regexp.MustCompile("(favorites|channels|direct_messages)_[a-z0-9]{26}_[a-z0-9]{26}") -func SidebarCategoriesFromJson(data io.Reader) ([]*SidebarCategoryWithChannels, error) { - var o []*SidebarCategoryWithChannels - err := json.NewDecoder(data).Decode(&o) - return o, err +func IsValidCategoryId(s string) bool { + // Category IDs can either be regular IDs + if IsValidId(s) { + return true + } + + // Or default categories can follow the pattern {type}_{userID}_{teamID} + return categoryIdPattern.MatchString(s) } -func OrderedSidebarCategoriesFromJson(data io.Reader) (*OrderedSidebarCategories, error) { - var o *OrderedSidebarCategories - err := json.NewDecoder(data).Decode(&o) - return o, err +func (SidebarCategoryType) ImplementsGraphQLType(name string) bool { + return name == "SidebarCategoryType" } -func (o SidebarCategoryWithChannels) ToJson() []byte { - b, _ := json.Marshal(o) - return b +func (t SidebarCategoryType) MarshalJSON() ([]byte, error) { + return json.Marshal(string(t)) } -func SidebarCategoriesWithChannelsToJson(o []*SidebarCategoryWithChannels) []byte { - if b, err := json.Marshal(o); err != nil { - return []byte("[]") - } else { - return b +func (t *SidebarCategoryType) UnmarshalGraphQL(input interface{}) error { + chType, ok := input.(string) + if !ok { + return errors.New("wrong type") } + + *t = SidebarCategoryType(chType) + return nil } -func (o OrderedSidebarCategories) ToJson() []byte { - if b, err := json.Marshal(o); err != nil { - return []byte("[]") - } else { - return b +func (SidebarCategorySorting) ImplementsGraphQLType(name string) bool { + return name == "SidebarCategorySorting" +} + +func (t SidebarCategorySorting) MarshalJSON() ([]byte, error) { + return json.Marshal(string(t)) +} + +func (t *SidebarCategorySorting) UnmarshalGraphQL(input interface{}) error { + chType, ok := input.(string) + if !ok { + return errors.New("wrong type") } + + *t = SidebarCategorySorting(chType) + return nil } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_stats.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_stats.go similarity index 54% rename from vendor/github.com/mattermost/mattermost-server/v5/model/channel_stats.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/channel_stats.go index 76f682aa..96631c11 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_stats.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_stats.go @@ -3,25 +3,22 @@ package model -import ( - "encoding/json" - "io" -) - type ChannelStats struct { ChannelId string `json:"channel_id"` MemberCount int64 `json:"member_count"` GuestCount int64 `json:"guest_count"` PinnedPostCount int64 `json:"pinnedpost_count"` + FilesCount int64 `json:"files_count"` +} + +func (o *ChannelStats) MemberCount_() float64 { + return float64(o.MemberCount) } -func (o *ChannelStats) ToJson() string { - b, _ := json.Marshal(o) - return string(b) +func (o *ChannelStats) GuestCount_() float64 { + return float64(o.GuestCount) } -func ChannelStatsFromJson(data io.Reader) *ChannelStats { - var o *ChannelStats - json.NewDecoder(data).Decode(&o) - return o +func (o *ChannelStats) PinnedPostCount_() float64 { + return float64(o.PinnedPostCount) } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_view.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_view.go new file mode 100644 index 00000000..c34c438d --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_view.go @@ -0,0 +1,15 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type ChannelView struct { + ChannelId string `json:"channel_id"` + PrevChannelId string `json:"prev_channel_id"` + CollapsedThreadsSupported bool `json:"collapsed_threads_supported"` +} + +type ChannelViewResponse struct { + Status string `json:"status"` + LastViewedAtTimes map[string]int64 `json:"last_viewed_at_times"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go b/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go new file mode 100644 index 00000000..c6e7d887 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go @@ -0,0 +1,8032 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + HeaderRequestId = "X-Request-ID" + HeaderVersionId = "X-Version-ID" + HeaderClusterId = "X-Cluster-ID" + HeaderEtagServer = "ETag" + HeaderEtagClient = "If-None-Match" + HeaderForwarded = "X-Forwarded-For" + HeaderRealIP = "X-Real-IP" + HeaderForwardedProto = "X-Forwarded-Proto" + HeaderToken = "token" + HeaderCsrfToken = "X-CSRF-Token" + HeaderBearer = "BEARER" + HeaderAuth = "Authorization" + HeaderCloudToken = "X-Cloud-Token" + HeaderRemoteclusterToken = "X-RemoteCluster-Token" + HeaderRemoteclusterId = "X-RemoteCluster-Id" + HeaderRequestedWith = "X-Requested-With" + HeaderRequestedWithXML = "XMLHttpRequest" + HeaderRange = "Range" + STATUS = "status" + StatusOk = "OK" + StatusFail = "FAIL" + StatusUnhealthy = "UNHEALTHY" + StatusRemove = "REMOVE" + + ClientDir = "client" + + APIURLSuffixV1 = "/api/v1" + APIURLSuffixV4 = "/api/v4" + APIURLSuffixV5 = "/api/v5" + APIURLSuffix = APIURLSuffixV4 +) + +type Response struct { + StatusCode int + RequestId string + Etag string + ServerVersion string + Header http.Header +} + +type Client4 struct { + URL string // The location of the server, for example "http://localhost:8065" + APIURL string // The api location of the server, for example "http://localhost:8065/api/v4" + HTTPClient *http.Client // The http client + AuthToken string + AuthType string + HTTPHeader map[string]string // Headers to be copied over for each request + + // TrueString is the string value sent to the server for true boolean query parameters. + trueString string + + // FalseString is the string value sent to the server for false boolean query parameters. + falseString string +} + +// SetBoolString is a helper method for overriding how true and false query string parameters are +// sent to the server. +// +// This method is only exposed for testing. It is never necessary to configure these values +// in production. +func (c *Client4) SetBoolString(value bool, valueStr string) { + if value { + c.trueString = valueStr + } else { + c.falseString = valueStr + } +} + +// boolString builds the query string parameter for boolean values. +func (c *Client4) boolString(value bool) string { + if value && c.trueString != "" { + return c.trueString + } else if !value && c.falseString != "" { + return c.falseString + } + + if value { + return "true" + } + return "false" +} + +func closeBody(r *http.Response) { + if r.Body != nil { + _, _ = io.Copy(ioutil.Discard, r.Body) + _ = r.Body.Close() + } +} + +func NewAPIv4Client(url string) *Client4 { + url = strings.TrimRight(url, "/") + return &Client4{url, url + APIURLSuffix, &http.Client{}, "", "", map[string]string{}, "", ""} +} + +func NewAPIv4SocketClient(socketPath string) *Client4 { + tr := &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return net.Dial("unix", socketPath) + }, + } + + client := NewAPIv4Client("http://_") + client.HTTPClient = &http.Client{Transport: tr} + + return client +} + +func BuildResponse(r *http.Response) *Response { + if r == nil { + return nil + } + + return &Response{ + StatusCode: r.StatusCode, + RequestId: r.Header.Get(HeaderRequestId), + Etag: r.Header.Get(HeaderEtagServer), + ServerVersion: r.Header.Get(HeaderVersionId), + Header: r.Header, + } +} + +func (c *Client4) SetToken(token string) { + c.AuthToken = token + c.AuthType = HeaderBearer +} + +// MockSession is deprecated in favour of SetToken +func (c *Client4) MockSession(token string) { + c.SetToken(token) +} + +func (c *Client4) SetOAuthToken(token string) { + c.AuthToken = token + c.AuthType = HeaderToken +} + +func (c *Client4) ClearOAuthToken() { + c.AuthToken = "" + c.AuthType = HeaderBearer +} + +func (c *Client4) usersRoute() string { + return "/users" +} + +func (c *Client4) userRoute(userId string) string { + return fmt.Sprintf(c.usersRoute()+"/%v", userId) +} + +func (c *Client4) userThreadsRoute(userID, teamID string) string { + return c.userRoute(userID) + c.teamRoute(teamID) + "/threads" +} + +func (c *Client4) userThreadRoute(userId, teamId, threadId string) string { + return c.userThreadsRoute(userId, teamId) + "/" + threadId +} + +func (c *Client4) userCategoryRoute(userID, teamID string) string { + return c.userRoute(userID) + c.teamRoute(teamID) + "/channels/categories" +} + +func (c *Client4) userAccessTokensRoute() string { + return fmt.Sprintf(c.usersRoute() + "/tokens") +} + +func (c *Client4) userAccessTokenRoute(tokenId string) string { + return fmt.Sprintf(c.usersRoute()+"/tokens/%v", tokenId) +} + +func (c *Client4) userByUsernameRoute(userName string) string { + return fmt.Sprintf(c.usersRoute()+"/username/%v", userName) +} + +func (c *Client4) userByEmailRoute(email string) string { + return fmt.Sprintf(c.usersRoute()+"/email/%v", email) +} + +func (c *Client4) botsRoute() string { + return "/bots" +} + +func (c *Client4) botRoute(botUserId string) string { + return fmt.Sprintf("%s/%s", c.botsRoute(), botUserId) +} + +func (c *Client4) teamsRoute() string { + return "/teams" +} + +func (c *Client4) teamRoute(teamId string) string { + return fmt.Sprintf(c.teamsRoute()+"/%v", teamId) +} + +func (c *Client4) teamAutoCompleteCommandsRoute(teamId string) string { + return fmt.Sprintf(c.teamsRoute()+"/%v/commands/autocomplete", teamId) +} + +func (c *Client4) teamByNameRoute(teamName string) string { + return fmt.Sprintf(c.teamsRoute()+"/name/%v", teamName) +} + +func (c *Client4) teamMemberRoute(teamId, userId string) string { + return fmt.Sprintf(c.teamRoute(teamId)+"/members/%v", userId) +} + +func (c *Client4) teamMembersRoute(teamId string) string { + return fmt.Sprintf(c.teamRoute(teamId) + "/members") +} + +func (c *Client4) teamStatsRoute(teamId string) string { + return fmt.Sprintf(c.teamRoute(teamId) + "/stats") +} + +func (c *Client4) teamImportRoute(teamId string) string { + return fmt.Sprintf(c.teamRoute(teamId) + "/import") +} + +func (c *Client4) channelsRoute() string { + return "/channels" +} + +func (c *Client4) channelsForTeamRoute(teamId string) string { + return fmt.Sprintf(c.teamRoute(teamId) + "/channels") +} + +func (c *Client4) channelRoute(channelId string) string { + return fmt.Sprintf(c.channelsRoute()+"/%v", channelId) +} + +func (c *Client4) channelByNameRoute(channelName, teamId string) string { + return fmt.Sprintf(c.teamRoute(teamId)+"/channels/name/%v", channelName) +} + +func (c *Client4) channelsForTeamForUserRoute(teamId, userId string, includeDeleted bool) string { + route := fmt.Sprintf(c.userRoute(userId) + c.teamRoute(teamId) + "/channels") + if includeDeleted { + query := fmt.Sprintf("?include_deleted=%v", includeDeleted) + return route + query + } + return route +} + +func (c *Client4) channelByNameForTeamNameRoute(channelName, teamName string) string { + return fmt.Sprintf(c.teamByNameRoute(teamName)+"/channels/name/%v", channelName) +} + +func (c *Client4) channelMembersRoute(channelId string) string { + return fmt.Sprintf(c.channelRoute(channelId) + "/members") +} + +func (c *Client4) channelMemberRoute(channelId, userId string) string { + return fmt.Sprintf(c.channelMembersRoute(channelId)+"/%v", userId) +} + +func (c *Client4) postsRoute() string { + return "/posts" +} + +func (c *Client4) postsEphemeralRoute() string { + return "/posts/ephemeral" +} + +func (c *Client4) configRoute() string { + return "/config" +} + +func (c *Client4) licenseRoute() string { + return "/license" +} + +func (c *Client4) postRoute(postId string) string { + return fmt.Sprintf(c.postsRoute()+"/%v", postId) +} + +func (c *Client4) filesRoute() string { + return "/files" +} + +func (c *Client4) fileRoute(fileId string) string { + return fmt.Sprintf(c.filesRoute()+"/%v", fileId) +} + +func (c *Client4) uploadsRoute() string { + return "/uploads" +} + +func (c *Client4) uploadRoute(uploadId string) string { + return fmt.Sprintf("%s/%s", c.uploadsRoute(), uploadId) +} + +func (c *Client4) pluginsRoute() string { + return "/plugins" +} + +func (c *Client4) pluginRoute(pluginId string) string { + return fmt.Sprintf(c.pluginsRoute()+"/%v", pluginId) +} + +func (c *Client4) systemRoute() string { + return "/system" +} + +func (c *Client4) cloudRoute() string { + return "/cloud" +} + +func (c *Client4) testEmailRoute() string { + return "/email/test" +} + +func (c *Client4) testSiteURLRoute() string { + return "/site_url/test" +} + +func (c *Client4) testS3Route() string { + return "/file/s3_test" +} + +func (c *Client4) databaseRoute() string { + return "/database" +} + +func (c *Client4) cacheRoute() string { + return "/caches" +} + +func (c *Client4) clusterRoute() string { + return "/cluster" +} + +func (c *Client4) incomingWebhooksRoute() string { + return "/hooks/incoming" +} + +func (c *Client4) incomingWebhookRoute(hookID string) string { + return fmt.Sprintf(c.incomingWebhooksRoute()+"/%v", hookID) +} + +func (c *Client4) complianceReportsRoute() string { + return "/compliance/reports" +} + +func (c *Client4) complianceReportRoute(reportId string) string { + return fmt.Sprintf("%s/%s", c.complianceReportsRoute(), reportId) +} + +func (c *Client4) complianceReportDownloadRoute(reportId string) string { + return fmt.Sprintf("%s/%s/download", c.complianceReportsRoute(), reportId) +} + +func (c *Client4) outgoingWebhooksRoute() string { + return "/hooks/outgoing" +} + +func (c *Client4) outgoingWebhookRoute(hookID string) string { + return fmt.Sprintf(c.outgoingWebhooksRoute()+"/%v", hookID) +} + +func (c *Client4) preferencesRoute(userId string) string { + return fmt.Sprintf(c.userRoute(userId) + "/preferences") +} + +func (c *Client4) userStatusRoute(userId string) string { + return fmt.Sprintf(c.userRoute(userId) + "/status") +} + +func (c *Client4) userStatusesRoute() string { + return fmt.Sprintf(c.usersRoute() + "/status") +} + +func (c *Client4) samlRoute() string { + return "/saml" +} + +func (c *Client4) ldapRoute() string { + return "/ldap" +} + +func (c *Client4) brandRoute() string { + return "/brand" +} + +func (c *Client4) dataRetentionRoute() string { + return "/data_retention" +} + +func (c *Client4) dataRetentionPolicyRoute(policyID string) string { + return fmt.Sprintf(c.dataRetentionRoute()+"/policies/%v", policyID) +} + +func (c *Client4) elasticsearchRoute() string { + return "/elasticsearch" +} + +func (c *Client4) bleveRoute() string { + return "/bleve" +} + +func (c *Client4) commandsRoute() string { + return "/commands" +} + +func (c *Client4) commandRoute(commandId string) string { + return fmt.Sprintf(c.commandsRoute()+"/%v", commandId) +} + +func (c *Client4) commandMoveRoute(commandId string) string { + return fmt.Sprintf(c.commandsRoute()+"/%v/move", commandId) +} + +func (c *Client4) emojisRoute() string { + return "/emoji" +} + +func (c *Client4) emojiRoute(emojiId string) string { + return fmt.Sprintf(c.emojisRoute()+"/%v", emojiId) +} + +func (c *Client4) emojiByNameRoute(name string) string { + return fmt.Sprintf(c.emojisRoute()+"/name/%v", name) +} + +func (c *Client4) reactionsRoute() string { + return "/reactions" +} + +func (c *Client4) oAuthAppsRoute() string { + return "/oauth/apps" +} + +func (c *Client4) oAuthAppRoute(appId string) string { + return fmt.Sprintf("/oauth/apps/%v", appId) +} + +func (c *Client4) openGraphRoute() string { + return "/opengraph" +} + +func (c *Client4) jobsRoute() string { + return "/jobs" +} + +func (c *Client4) rolesRoute() string { + return "/roles" +} + +func (c *Client4) schemesRoute() string { + return "/schemes" +} + +func (c *Client4) schemeRoute(id string) string { + return c.schemesRoute() + fmt.Sprintf("/%v", id) +} + +func (c *Client4) analyticsRoute() string { + return "/analytics" +} + +func (c *Client4) timezonesRoute() string { + return fmt.Sprintf(c.systemRoute() + "/timezones") +} + +func (c *Client4) channelSchemeRoute(channelId string) string { + return fmt.Sprintf(c.channelsRoute()+"/%v/scheme", channelId) +} + +func (c *Client4) teamSchemeRoute(teamId string) string { + return fmt.Sprintf(c.teamsRoute()+"/%v/scheme", teamId) +} + +func (c *Client4) totalUsersStatsRoute() string { + return fmt.Sprintf(c.usersRoute() + "/stats") +} + +func (c *Client4) redirectLocationRoute() string { + return "/redirect_location" +} + +func (c *Client4) serverBusyRoute() string { + return "/server_busy" +} + +func (c *Client4) userTermsOfServiceRoute(userId string) string { + return c.userRoute(userId) + "/terms_of_service" +} + +func (c *Client4) termsOfServiceRoute() string { + return "/terms_of_service" +} + +func (c *Client4) groupsRoute() string { + return "/groups" +} + +func (c *Client4) publishUserTypingRoute(userId string) string { + return c.userRoute(userId) + "/typing" +} + +func (c *Client4) groupRoute(groupID string) string { + return fmt.Sprintf("%s/%s", c.groupsRoute(), groupID) +} + +func (c *Client4) groupSyncableRoute(groupID, syncableID string, syncableType GroupSyncableType) string { + return fmt.Sprintf("%s/%ss/%s", c.groupRoute(groupID), strings.ToLower(syncableType.String()), syncableID) +} + +func (c *Client4) groupSyncablesRoute(groupID string, syncableType GroupSyncableType) string { + return fmt.Sprintf("%s/%ss", c.groupRoute(groupID), strings.ToLower(syncableType.String())) +} + +func (c *Client4) importsRoute() string { + return "/imports" +} + +func (c *Client4) exportsRoute() string { + return "/exports" +} + +func (c *Client4) exportRoute(name string) string { + return fmt.Sprintf(c.exportsRoute()+"/%v", name) +} + +func (c *Client4) sharedChannelsRoute() string { + return "/sharedchannels" +} + +func (c *Client4) permissionsRoute() string { + return "/permissions" +} + +func (c *Client4) DoAPIGet(url string, etag string) (*http.Response, error) { + return c.DoAPIRequest(http.MethodGet, c.APIURL+url, "", etag) +} + +func (c *Client4) DoAPIPost(url string, data string) (*http.Response, error) { + return c.DoAPIRequest(http.MethodPost, c.APIURL+url, data, "") +} + +func (c *Client4) DoAPIDeleteBytes(url string, data []byte) (*http.Response, error) { + return c.DoAPIRequestBytes(http.MethodDelete, c.APIURL+url, data, "") +} + +func (c *Client4) DoAPIPatchBytes(url string, data []byte) (*http.Response, error) { + return c.DoAPIRequestBytes(http.MethodPatch, c.APIURL+url, data, "") +} + +func (c *Client4) DoAPIPostBytes(url string, data []byte) (*http.Response, error) { + return c.DoAPIRequestBytes(http.MethodPost, c.APIURL+url, data, "") +} + +func (c *Client4) DoAPIPut(url string, data string) (*http.Response, error) { + return c.DoAPIRequest(http.MethodPut, c.APIURL+url, data, "") +} + +func (c *Client4) DoAPIPutBytes(url string, data []byte) (*http.Response, error) { + return c.DoAPIRequestBytes(http.MethodPut, c.APIURL+url, data, "") +} + +func (c *Client4) DoAPIDelete(url string) (*http.Response, error) { + return c.DoAPIRequest(http.MethodDelete, c.APIURL+url, "", "") +} + +func (c *Client4) DoAPIRequest(method, url, data, etag string) (*http.Response, error) { + return c.DoAPIRequestReader(method, url, strings.NewReader(data), map[string]string{HeaderEtagClient: etag}) +} + +func (c *Client4) DoAPIRequestWithHeaders(method, url, data string, headers map[string]string) (*http.Response, error) { + return c.DoAPIRequestReader(method, url, strings.NewReader(data), headers) +} + +func (c *Client4) DoAPIRequestBytes(method, url string, data []byte, etag string) (*http.Response, error) { + return c.DoAPIRequestReader(method, url, bytes.NewReader(data), map[string]string{HeaderEtagClient: etag}) +} + +func (c *Client4) DoAPIRequestReader(method, url string, data io.Reader, headers map[string]string) (*http.Response, error) { + rq, err := http.NewRequest(method, url, data) + if err != nil { + return nil, err + } + + for k, v := range headers { + rq.Header.Set(k, v) + } + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + if c.HTTPHeader != nil && len(c.HTTPHeader) > 0 { + for k, v := range c.HTTPHeader { + rq.Header.Set(k, v) + } + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return rp, err + } + + if rp.StatusCode == 304 { + return rp, nil + } + + if rp.StatusCode >= 300 { + defer closeBody(rp) + return rp, AppErrorFromJSON(rp.Body) + } + + return rp, nil +} + +func (c *Client4) DoUploadFile(url string, data []byte, contentType string) (*FileUploadResponse, *Response, error) { + return c.doUploadFile(url, bytes.NewReader(data), contentType, 0) +} + +func (c *Client4) doUploadFile(url string, body io.Reader, contentType string, contentLength int64) (*FileUploadResponse, *Response, error) { + rq, err := http.NewRequest("POST", c.APIURL+url, body) + if err != nil { + return nil, nil, err + } + if contentLength != 0 { + rq.ContentLength = contentLength + } + rq.Header.Set("Content-Type", contentType) + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return nil, BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + var res FileUploadResponse + if jsonErr := json.NewDecoder(rp.Body).Decode(&res); jsonErr != nil { + return nil, nil, NewAppError("doUploadFile", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &res, BuildResponse(rp), nil +} + +func (c *Client4) DoEmojiUploadFile(url string, data []byte, contentType string) (*Emoji, *Response, error) { + rq, err := http.NewRequest("POST", c.APIURL+url, bytes.NewReader(data)) + if err != nil { + return nil, nil, err + } + rq.Header.Set("Content-Type", contentType) + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return nil, BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + var e Emoji + if jsonErr := json.NewDecoder(rp.Body).Decode(&e); jsonErr != nil { + return nil, nil, NewAppError("DoEmojiUploadFile", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &e, BuildResponse(rp), nil +} + +func (c *Client4) DoUploadImportTeam(url string, data []byte, contentType string) (map[string]string, *Response, error) { + rq, err := http.NewRequest("POST", c.APIURL+url, bytes.NewReader(data)) + if err != nil { + return nil, nil, err + } + rq.Header.Set("Content-Type", contentType) + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return nil, BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + return MapFromJSON(rp.Body), BuildResponse(rp), nil +} + +// Authentication Section + +// LoginById authenticates a user by user id and password. +func (c *Client4) LoginById(id string, password string) (*User, *Response, error) { + m := make(map[string]string) + m["id"] = id + m["password"] = password + return c.login(m) +} + +// Login authenticates a user by login id, which can be username, email or some sort +// of SSO identifier based on server configuration, and a password. +func (c *Client4) Login(loginId string, password string) (*User, *Response, error) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + return c.login(m) +} + +// LoginByLdap authenticates a user by LDAP id and password. +func (c *Client4) LoginByLdap(loginId string, password string) (*User, *Response, error) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + m["ldap_only"] = c.boolString(true) + return c.login(m) +} + +// LoginWithDevice authenticates a user by login id (username, email or some sort +// of SSO identifier based on configuration), password and attaches a device id to +// the session. +func (c *Client4) LoginWithDevice(loginId string, password string, deviceId string) (*User, *Response, error) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + m["device_id"] = deviceId + return c.login(m) +} + +// LoginWithMFA logs a user in with a MFA token +func (c *Client4) LoginWithMFA(loginId, password, mfaToken string) (*User, *Response, error) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + m["token"] = mfaToken + return c.login(m) +} + +func (c *Client4) login(m map[string]string) (*User, *Response, error) { + r, err := c.DoAPIPost("/users/login", MapToJSON(m)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + c.AuthToken = r.Header.Get(HeaderToken) + c.AuthType = HeaderBearer + + var user User + if jsonErr := json.NewDecoder(r.Body).Decode(&user); jsonErr != nil { + return nil, nil, NewAppError("login", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &user, BuildResponse(r), nil +} + +// Logout terminates the current user's session. +func (c *Client4) Logout() (*Response, error) { + r, err := c.DoAPIPost("/users/logout", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + c.AuthToken = "" + c.AuthType = HeaderBearer + return BuildResponse(r), nil +} + +// SwitchAccountType changes a user's login type from one type to another. +func (c *Client4) SwitchAccountType(switchRequest *SwitchRequest) (string, *Response, error) { + buf, err := json.Marshal(switchRequest) + if err != nil { + return "", BuildResponse(nil), NewAppError("SwitchAccountType", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.usersRoute()+"/login/switch", buf) + if err != nil { + return "", BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body)["follow_link"], BuildResponse(r), nil +} + +// User Section + +// CreateUser creates a user in the system based on the provided user struct. +func (c *Client4) CreateUser(user *User) (*User, *Response, error) { + userJSON, jsonErr := json.Marshal(user) + if jsonErr != nil { + return nil, nil, NewAppError("CreateUser", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + + r, err := c.DoAPIPost(c.usersRoute(), string(userJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("CreateUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// CreateUserWithToken creates a user in the system based on the provided tokenId. +func (c *Client4) CreateUserWithToken(user *User, tokenId string) (*User, *Response, error) { + if tokenId == "" { + return nil, nil, NewAppError("MissingHashOrData", "api.user.create_user.missing_token.app_error", nil, "", http.StatusBadRequest) + } + + query := fmt.Sprintf("?t=%v", tokenId) + buf, err := json.Marshal(user) + if err != nil { + return nil, nil, NewAppError("CreateUserWithToken", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.usersRoute()+query, buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var u User + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("CreateUserWithToken", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// CreateUserWithInviteId creates a user in the system based on the provided invited id. +func (c *Client4) CreateUserWithInviteId(user *User, inviteId string) (*User, *Response, error) { + if inviteId == "" { + return nil, nil, NewAppError("MissingInviteId", "api.user.create_user.missing_invite_id.app_error", nil, "", http.StatusBadRequest) + } + + query := fmt.Sprintf("?iid=%v", url.QueryEscape(inviteId)) + buf, err := json.Marshal(user) + if err != nil { + return nil, nil, NewAppError("CreateUserWithInviteId", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.usersRoute()+query, buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var u User + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("CreateUserWithInviteId", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// GetMe returns the logged in user. +func (c *Client4) GetMe(etag string) (*User, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(Me), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if r.StatusCode == http.StatusNotModified { + return &u, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("GetMe", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// GetUser returns a user based on the provided user id string. +func (c *Client4) GetUser(userId, etag string) (*User, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if r.StatusCode == http.StatusNotModified { + return &u, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("GetUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// GetUserByUsername returns a user based on the provided user name string. +func (c *Client4) GetUserByUsername(userName, etag string) (*User, *Response, error) { + r, err := c.DoAPIGet(c.userByUsernameRoute(userName), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if r.StatusCode == http.StatusNotModified { + return &u, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("GetUserByUsername", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// GetUserByEmail returns a user based on the provided user email string. +func (c *Client4) GetUserByEmail(email, etag string) (*User, *Response, error) { + r, err := c.DoAPIGet(c.userByEmailRoute(email), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if r.StatusCode == http.StatusNotModified { + return &u, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("GetUserByEmail", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// AutocompleteUsersInTeam returns the users on a team based on search term. +func (c *Client4) AutocompleteUsersInTeam(teamId string, username string, limit int, etag string) (*UserAutocomplete, *Response, error) { + query := fmt.Sprintf("?in_team=%v&name=%v&limit=%d", teamId, username, limit) + r, err := c.DoAPIGet(c.usersRoute()+"/autocomplete"+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u UserAutocomplete + if r.StatusCode == http.StatusNotModified { + return &u, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("AutocompleteUsersInTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// AutocompleteUsersInChannel returns the users in a channel based on search term. +func (c *Client4) AutocompleteUsersInChannel(teamId string, channelId string, username string, limit int, etag string) (*UserAutocomplete, *Response, error) { + query := fmt.Sprintf("?in_team=%v&in_channel=%v&name=%v&limit=%d", teamId, channelId, username, limit) + r, err := c.DoAPIGet(c.usersRoute()+"/autocomplete"+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u UserAutocomplete + if r.StatusCode == http.StatusNotModified { + return &u, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("AutocompleteUsersInChannel", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// AutocompleteUsers returns the users in the system based on search term. +func (c *Client4) AutocompleteUsers(username string, limit int, etag string) (*UserAutocomplete, *Response, error) { + query := fmt.Sprintf("?name=%v&limit=%d", username, limit) + r, err := c.DoAPIGet(c.usersRoute()+"/autocomplete"+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u UserAutocomplete + if r.StatusCode == http.StatusNotModified { + return &u, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("AutocompleteUsers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// GetDefaultProfileImage gets the default user's profile image. Must be logged in. +func (c *Client4) GetDefaultProfileImage(userId string) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId)+"/image/default", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetDefaultProfileImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + + return data, BuildResponse(r), nil +} + +// GetProfileImage gets user's profile image. Must be logged in. +func (c *Client4) GetProfileImage(userId, etag string) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId)+"/image", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetProfileImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// GetUsers returns a page of users on the system. Page counting starts at 0. +func (c *Client4) GetUsers(page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersInTeam returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersInTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetNewUsersInTeam returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetNewUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?sort=create_at&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetNewUsersInTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetRecentlyActiveUsersInTeam returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetRecentlyActiveUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?sort=last_activity_at&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetRecentlyActiveUsersInTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetActiveUsersInTeam returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetActiveUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?active=true&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetActiveUsersInTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersNotInTeam returns a page of users who are not in a team. Page counting starts at 0. +func (c *Client4) GetUsersNotInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?not_in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersNotInTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersInChannel returns a page of users in a channel. Page counting starts at 0. +func (c *Client4) GetUsersInChannel(channelId string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?in_channel=%v&page=%v&per_page=%v", channelId, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersInChannel", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersInChannelByStatus returns a page of users in a channel. Page counting starts at 0. Sorted by Status +func (c *Client4) GetUsersInChannelByStatus(channelId string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?in_channel=%v&page=%v&per_page=%v&sort=status", channelId, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersInChannelByStatus", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersNotInChannel returns a page of users not in a channel. Page counting starts at 0. +func (c *Client4) GetUsersNotInChannel(teamId, channelId string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?in_team=%v¬_in_channel=%v&page=%v&per_page=%v", teamId, channelId, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersNotInChannel", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersWithoutTeam returns a page of users on the system that aren't on any teams. Page counting starts at 0. +func (c *Client4) GetUsersWithoutTeam(page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?without_team=1&page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersWithoutTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersInGroup returns a page of users in a group. Page counting starts at 0. +func (c *Client4) GetUsersInGroup(groupID string, page int, perPage int, etag string) ([]*User, *Response, error) { + query := fmt.Sprintf("?in_group=%v&page=%v&per_page=%v", groupID, page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersInGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersByIds returns a list of users based on the provided user ids. +func (c *Client4) GetUsersByIds(userIds []string) ([]*User, *Response, error) { + r, err := c.DoAPIPost(c.usersRoute()+"/ids", ArrayToJSON(userIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersByIds", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersByIds returns a list of users based on the provided user ids. +func (c *Client4) GetUsersByIdsWithOptions(userIds []string, options *UserGetByIdsOptions) ([]*User, *Response, error) { + v := url.Values{} + if options.Since != 0 { + v.Set("since", fmt.Sprintf("%d", options.Since)) + } + + url := c.usersRoute() + "/ids" + if len(v) > 0 { + url += "?" + v.Encode() + } + + r, err := c.DoAPIPost(url, ArrayToJSON(userIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersByIdsWithOptions", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersByUsernames returns a list of users based on the provided usernames. +func (c *Client4) GetUsersByUsernames(usernames []string) ([]*User, *Response, error) { + r, err := c.DoAPIPost(c.usersRoute()+"/usernames", ArrayToJSON(usernames)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersByUsernames", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUsersByGroupChannelIds returns a map with channel ids as keys +// and a list of users as values based on the provided user ids. +func (c *Client4) GetUsersByGroupChannelIds(groupChannelIds []string) (map[string][]*User, *Response, error) { + r, err := c.DoAPIPost(c.usersRoute()+"/group_channels", ArrayToJSON(groupChannelIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + usersByChannelId := map[string][]*User{} + json.NewDecoder(r.Body).Decode(&usersByChannelId) + return usersByChannelId, BuildResponse(r), nil +} + +// SearchUsers returns a list of users based on some search criteria. +func (c *Client4) SearchUsers(search *UserSearch) ([]*User, *Response, error) { + buf, err := json.Marshal(search) + if err != nil { + return nil, nil, NewAppError("SearchUsers", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.usersRoute()+"/search", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("SearchUsers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// UpdateUser updates a user in the system based on the provided user struct. +func (c *Client4) UpdateUser(user *User) (*User, *Response, error) { + buf, err := json.Marshal(user) + if err != nil { + return nil, nil, NewAppError("UpdateUser", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.userRoute(user.Id), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("UpdateUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// PatchUser partially updates a user in the system. Any missing fields are not updated. +func (c *Client4) PatchUser(userId string, patch *UserPatch) (*User, *Response, error) { + buf, err := json.Marshal(patch) + if err != nil { + return nil, nil, NewAppError("PatchUser", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.userRoute(userId)+"/patch", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("PatchUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// UpdateUserAuth updates a user AuthData (uthData, authService and password) in the system. +func (c *Client4) UpdateUserAuth(userId string, userAuth *UserAuth) (*UserAuth, *Response, error) { + buf, err := json.Marshal(userAuth) + if err != nil { + return nil, nil, NewAppError("UpdateUserAuth", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.userRoute(userId)+"/auth", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var ua UserAuth + if jsonErr := json.NewDecoder(r.Body).Decode(&ua); jsonErr != nil { + return nil, nil, NewAppError("UpdateUserAuth", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &ua, BuildResponse(r), nil +} + +// UpdateUserMfa activates multi-factor authentication for a user if activate +// is true and a valid code is provided. If activate is false, then code is not +// required and multi-factor authentication is disabled for the user. +func (c *Client4) UpdateUserMfa(userId, code string, activate bool) (*Response, error) { + requestBody := make(map[string]interface{}) + requestBody["activate"] = activate + requestBody["code"] = code + + r, err := c.DoAPIPut(c.userRoute(userId)+"/mfa", StringInterfaceToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GenerateMfaSecret will generate a new MFA secret for a user and return it as a string and +// as a base64 encoded image QR code. +func (c *Client4) GenerateMfaSecret(userId string) (*MfaSecret, *Response, error) { + r, err := c.DoAPIPost(c.userRoute(userId)+"/mfa/generate", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var secret MfaSecret + if jsonErr := json.NewDecoder(r.Body).Decode(&secret); jsonErr != nil { + return nil, nil, NewAppError("GenerateMfaSecret", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &secret, BuildResponse(r), nil +} + +// UpdateUserPassword updates a user's password. Must be logged in as the user or be a system administrator. +func (c *Client4) UpdateUserPassword(userId, currentPassword, newPassword string) (*Response, error) { + requestBody := map[string]string{"current_password": currentPassword, "new_password": newPassword} + r, err := c.DoAPIPut(c.userRoute(userId)+"/password", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateUserHashedPassword updates a user's password with an already-hashed password. Must be a system administrator. +func (c *Client4) UpdateUserHashedPassword(userId, newHashedPassword string) (*Response, error) { + requestBody := map[string]string{"already_hashed": "true", "new_password": newHashedPassword} + r, err := c.DoAPIPut(c.userRoute(userId)+"/password", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// PromoteGuestToUser convert a guest into a regular user +func (c *Client4) PromoteGuestToUser(guestId string) (*Response, error) { + r, err := c.DoAPIPost(c.userRoute(guestId)+"/promote", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DemoteUserToGuest convert a regular user into a guest +func (c *Client4) DemoteUserToGuest(guestId string) (*Response, error) { + r, err := c.DoAPIPost(c.userRoute(guestId)+"/demote", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateUserRoles updates a user's roles in the system. A user can have "system_user" and "system_admin" roles. +func (c *Client4) UpdateUserRoles(userId, roles string) (*Response, error) { + requestBody := map[string]string{"roles": roles} + r, err := c.DoAPIPut(c.userRoute(userId)+"/roles", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateUserActive updates status of a user whether active or not. +func (c *Client4) UpdateUserActive(userId string, active bool) (*Response, error) { + requestBody := make(map[string]interface{}) + requestBody["active"] = active + r, err := c.DoAPIPut(c.userRoute(userId)+"/active", StringInterfaceToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + + return BuildResponse(r), nil +} + +// DeleteUser deactivates a user in the system based on the provided user id string. +func (c *Client4) DeleteUser(userId string) (*Response, error) { + r, err := c.DoAPIDelete(c.userRoute(userId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// PermanentDeleteUser deletes a user in the system based on the provided user id string. +func (c *Client4) PermanentDeleteUser(userId string) (*Response, error) { + r, err := c.DoAPIDelete(c.userRoute(userId) + "?permanent=" + c.boolString(true)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// ConvertUserToBot converts a user to a bot user. +func (c *Client4) ConvertUserToBot(userId string) (*Bot, *Response, error) { + r, err := c.DoAPIPost(c.userRoute(userId)+"/convert_to_bot", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var bot *Bot + err = json.NewDecoder(r.Body).Decode(&bot) + if err != nil { + return nil, BuildResponse(r), NewAppError("ConvertUserToBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return bot, BuildResponse(r), nil +} + +// ConvertBotToUser converts a bot user to a user. +func (c *Client4) ConvertBotToUser(userId string, userPatch *UserPatch, setSystemAdmin bool) (*User, *Response, error) { + var query string + if setSystemAdmin { + query = "?set_system_admin=true" + } + buf, err := json.Marshal(userPatch) + if err != nil { + return nil, nil, NewAppError("ConvertBotToUser", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.botRoute(userId)+"/convert_to_user"+query, buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("ConvertBotToUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// PermanentDeleteAll permanently deletes all users in the system. This is a local only endpoint +func (c *Client4) PermanentDeleteAllUsers() (*Response, error) { + r, err := c.DoAPIDelete(c.usersRoute()) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// SendPasswordResetEmail will send a link for password resetting to a user with the +// provided email. +func (c *Client4) SendPasswordResetEmail(email string) (*Response, error) { + requestBody := map[string]string{"email": email} + r, err := c.DoAPIPost(c.usersRoute()+"/password/reset/send", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// ResetPassword uses a recovery code to update reset a user's password. +func (c *Client4) ResetPassword(token, newPassword string) (*Response, error) { + requestBody := map[string]string{"token": token, "new_password": newPassword} + r, err := c.DoAPIPost(c.usersRoute()+"/password/reset", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetSessions returns a list of sessions based on the provided user id string. +func (c *Client4) GetSessions(userId, etag string) ([]*Session, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId)+"/sessions", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Session + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetSessions", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// RevokeSession revokes a user session based on the provided user id and session id strings. +func (c *Client4) RevokeSession(userId, sessionId string) (*Response, error) { + requestBody := map[string]string{"session_id": sessionId} + r, err := c.DoAPIPost(c.userRoute(userId)+"/sessions/revoke", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// RevokeAllSessions revokes all sessions for the provided user id string. +func (c *Client4) RevokeAllSessions(userId string) (*Response, error) { + r, err := c.DoAPIPost(c.userRoute(userId)+"/sessions/revoke/all", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// RevokeAllSessions revokes all sessions for all the users. +func (c *Client4) RevokeSessionsFromAllUsers() (*Response, error) { + r, err := c.DoAPIPost(c.usersRoute()+"/sessions/revoke/all", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// AttachDeviceId attaches a mobile device ID to the current session. +func (c *Client4) AttachDeviceId(deviceId string) (*Response, error) { + requestBody := map[string]string{"device_id": deviceId} + r, err := c.DoAPIPut(c.usersRoute()+"/sessions/device", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetTeamsUnreadForUser will return an array with TeamUnread objects that contain the amount +// of unread messages and mentions the current user has for the teams it belongs to. +// An optional team ID can be set to exclude that team from the results. +// An optional boolean can be set to include collapsed thread unreads. Must be authenticated. +func (c *Client4) GetTeamsUnreadForUser(userId, teamIdToExclude string, includeCollapsedThreads bool) ([]*TeamUnread, *Response, error) { + query := url.Values{} + + if teamIdToExclude != "" { + query.Set("exclude_team", teamIdToExclude) + } + + if includeCollapsedThreads { + query.Set("include_collapsed_threads", "true") + } + + r, err := c.DoAPIGet(c.userRoute(userId)+"/teams/unread?"+query.Encode(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var list []*TeamUnread + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetTeamsUnreadForUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUserAudits returns a list of audit based on the provided user id string. +func (c *Client4) GetUserAudits(userId string, page int, perPage int, etag string) (Audits, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.userRoute(userId)+"/audits"+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var audits Audits + err = json.NewDecoder(r.Body).Decode(&audits) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetUserAudits", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return audits, BuildResponse(r), nil +} + +// VerifyUserEmail will verify a user's email using the supplied token. +func (c *Client4) VerifyUserEmail(token string) (*Response, error) { + requestBody := map[string]string{"token": token} + r, err := c.DoAPIPost(c.usersRoute()+"/email/verify", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// VerifyUserEmailWithoutToken will verify a user's email by its Id. (Requires manage system role) +func (c *Client4) VerifyUserEmailWithoutToken(userId string) (*User, *Response, error) { + r, err := c.DoAPIPost(c.userRoute(userId)+"/email/verify/member", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u User + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("VerifyUserEmailWithoutToken", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// SendVerificationEmail will send an email to the user with the provided email address, if +// that user exists. The email will contain a link that can be used to verify the user's +// email address. +func (c *Client4) SendVerificationEmail(email string) (*Response, error) { + requestBody := map[string]string{"email": email} + r, err := c.DoAPIPost(c.usersRoute()+"/email/verify/send", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// SetDefaultProfileImage resets the profile image to a default generated one. +func (c *Client4) SetDefaultProfileImage(userId string) (*Response, error) { + r, err := c.DoAPIDelete(c.userRoute(userId) + "/image") + if err != nil { + return BuildResponse(r), err + } + return BuildResponse(r), nil +} + +// SetProfileImage sets profile image of the user. +func (c *Client4) SetProfileImage(userId string, data []byte) (*Response, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("image", "profile.png") + if err != nil { + return nil, NewAppError("SetProfileImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, NewAppError("SetProfileImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if err = writer.Close(); err != nil { + return nil, NewAppError("SetProfileImage", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest) + } + + rq, err := http.NewRequest("POST", c.APIURL+c.userRoute(userId)+"/image", bytes.NewReader(body.Bytes())) + if err != nil { + return nil, err + } + rq.Header.Set("Content-Type", writer.FormDataContentType()) + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + return BuildResponse(rp), nil +} + +// CreateUserAccessToken will generate a user access token that can be used in place +// of a session token to access the REST API. Must have the 'create_user_access_token' +// permission and if generating for another user, must have the 'edit_other_users' +// permission. A non-blank description is required. +func (c *Client4) CreateUserAccessToken(userId, description string) (*UserAccessToken, *Response, error) { + requestBody := map[string]string{"description": description} + r, err := c.DoAPIPost(c.userRoute(userId)+"/tokens", MapToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var uat UserAccessToken + if jsonErr := json.NewDecoder(r.Body).Decode(&uat); jsonErr != nil { + return nil, nil, NewAppError("CreateUserAccessToken", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &uat, BuildResponse(r), nil +} + +// GetUserAccessTokens will get a page of access tokens' id, description, is_active +// and the user_id in the system. The actual token will not be returned. Must have +// the 'manage_system' permission. +func (c *Client4) GetUserAccessTokens(page int, perPage int) ([]*UserAccessToken, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.userAccessTokensRoute()+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*UserAccessToken + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUserAccessTokens", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetUserAccessToken will get a user access tokens' id, description, is_active +// and the user_id of the user it is for. The actual token will not be returned. +// Must have the 'read_user_access_token' permission and if getting for another +// user, must have the 'edit_other_users' permission. +func (c *Client4) GetUserAccessToken(tokenId string) (*UserAccessToken, *Response, error) { + r, err := c.DoAPIGet(c.userAccessTokenRoute(tokenId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var uat UserAccessToken + if jsonErr := json.NewDecoder(r.Body).Decode(&uat); jsonErr != nil { + return nil, nil, NewAppError("GetUserAccessToken", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &uat, BuildResponse(r), nil +} + +// GetUserAccessTokensForUser will get a paged list of user access tokens showing id, +// description and user_id for each. The actual tokens will not be returned. Must have +// the 'read_user_access_token' permission and if getting for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) GetUserAccessTokensForUser(userId string, page, perPage int) ([]*UserAccessToken, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.userRoute(userId)+"/tokens"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*UserAccessToken + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUserAccessTokensForUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// RevokeUserAccessToken will revoke a user access token by id. Must have the +// 'revoke_user_access_token' permission and if revoking for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) RevokeUserAccessToken(tokenId string) (*Response, error) { + requestBody := map[string]string{"token_id": tokenId} + r, err := c.DoAPIPost(c.usersRoute()+"/tokens/revoke", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// SearchUserAccessTokens returns user access tokens matching the provided search term. +func (c *Client4) SearchUserAccessTokens(search *UserAccessTokenSearch) ([]*UserAccessToken, *Response, error) { + buf, err := json.Marshal(search) + if err != nil { + return nil, nil, NewAppError("SearchUserAccessTokens", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.usersRoute()+"/tokens/search", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*UserAccessToken + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("SearchUserAccessTokens", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// DisableUserAccessToken will disable a user access token by id. Must have the +// 'revoke_user_access_token' permission and if disabling for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) DisableUserAccessToken(tokenId string) (*Response, error) { + requestBody := map[string]string{"token_id": tokenId} + r, err := c.DoAPIPost(c.usersRoute()+"/tokens/disable", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// EnableUserAccessToken will enable a user access token by id. Must have the +// 'create_user_access_token' permission and if enabling for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) EnableUserAccessToken(tokenId string) (*Response, error) { + requestBody := map[string]string{"token_id": tokenId} + r, err := c.DoAPIPost(c.usersRoute()+"/tokens/enable", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// Bots section + +// CreateBot creates a bot in the system based on the provided bot struct. +func (c *Client4) CreateBot(bot *Bot) (*Bot, *Response, error) { + buf, err := json.Marshal(bot) + if err != nil { + return nil, nil, NewAppError("CreateBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.botsRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var resp *Bot + err = json.NewDecoder(r.Body).Decode(&resp) + if err != nil { + return nil, BuildResponse(r), NewAppError("CreateBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return resp, BuildResponse(r), nil +} + +// PatchBot partially updates a bot. Any missing fields are not updated. +func (c *Client4) PatchBot(userId string, patch *BotPatch) (*Bot, *Response, error) { + buf, err := json.Marshal(patch) + if err != nil { + return nil, nil, NewAppError("PatchBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.botRoute(userId), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bot *Bot + err = json.NewDecoder(r.Body).Decode(&bot) + if err != nil { + return nil, BuildResponse(r), NewAppError("PatchBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return bot, BuildResponse(r), nil +} + +// GetBot fetches the given, undeleted bot. +func (c *Client4) GetBot(userId string, etag string) (*Bot, *Response, error) { + r, err := c.DoAPIGet(c.botRoute(userId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bot *Bot + err = json.NewDecoder(r.Body).Decode(&bot) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return bot, BuildResponse(r), nil +} + +// GetBotIncludeDeleted fetches the given bot, even if it is deleted. +func (c *Client4) GetBotIncludeDeleted(userId string, etag string) (*Bot, *Response, error) { + r, err := c.DoAPIGet(c.botRoute(userId)+"?include_deleted="+c.boolString(true), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bot *Bot + err = json.NewDecoder(r.Body).Decode(&bot) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetBotIncludeDeleted", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return bot, BuildResponse(r), nil +} + +// GetBots fetches the given page of bots, excluding deleted. +func (c *Client4) GetBots(page, perPage int, etag string) ([]*Bot, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.botsRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bots BotList + err = json.NewDecoder(r.Body).Decode(&bots) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetBots", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return bots, BuildResponse(r), nil +} + +// GetBotsIncludeDeleted fetches the given page of bots, including deleted. +func (c *Client4) GetBotsIncludeDeleted(page, perPage int, etag string) ([]*Bot, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted="+c.boolString(true), page, perPage) + r, err := c.DoAPIGet(c.botsRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bots BotList + err = json.NewDecoder(r.Body).Decode(&bots) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetBotsIncludeDeleted", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return bots, BuildResponse(r), nil +} + +// GetBotsOrphaned fetches the given page of bots, only including orphaned bots. +func (c *Client4) GetBotsOrphaned(page, perPage int, etag string) ([]*Bot, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&only_orphaned="+c.boolString(true), page, perPage) + r, err := c.DoAPIGet(c.botsRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bots BotList + err = json.NewDecoder(r.Body).Decode(&bots) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetBotsOrphaned", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return bots, BuildResponse(r), nil +} + +// DisableBot disables the given bot in the system. +func (c *Client4) DisableBot(botUserId string) (*Bot, *Response, error) { + r, err := c.DoAPIPostBytes(c.botRoute(botUserId)+"/disable", nil) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bot *Bot + err = json.NewDecoder(r.Body).Decode(&bot) + if err != nil { + return nil, BuildResponse(r), NewAppError("DisableBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return bot, BuildResponse(r), nil +} + +// EnableBot disables the given bot in the system. +func (c *Client4) EnableBot(botUserId string) (*Bot, *Response, error) { + r, err := c.DoAPIPostBytes(c.botRoute(botUserId)+"/enable", nil) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bot *Bot + err = json.NewDecoder(r.Body).Decode(&bot) + if err != nil { + return nil, BuildResponse(r), NewAppError("EnableBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return bot, BuildResponse(r), nil +} + +// AssignBot assigns the given bot to the given user +func (c *Client4) AssignBot(botUserId, newOwnerId string) (*Bot, *Response, error) { + r, err := c.DoAPIPostBytes(c.botRoute(botUserId)+"/assign/"+newOwnerId, nil) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var bot *Bot + err = json.NewDecoder(r.Body).Decode(&bot) + if err != nil { + return nil, BuildResponse(r), NewAppError("AssignBot", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return bot, BuildResponse(r), nil +} + +// Team Section + +// CreateTeam creates a team in the system based on the provided team struct. +func (c *Client4) CreateTeam(team *Team) (*Team, *Response, error) { + buf, err := json.Marshal(team) + if err != nil { + return nil, nil, NewAppError("CreateTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.teamsRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("CreateTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// GetTeam returns a team based on the provided team id string. +func (c *Client4) GetTeam(teamId, etag string) (*Team, *Response, error) { + r, err := c.DoAPIGet(c.teamRoute(teamId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("GetTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// GetAllTeams returns all teams based on permissions. +func (c *Client4) GetAllTeams(etag string, page int, perPage int) ([]*Team, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.teamsRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Team + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetAllTeams", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetAllTeamsWithTotalCount returns all teams based on permissions. +func (c *Client4) GetAllTeamsWithTotalCount(etag string, page int, perPage int) ([]*Team, int64, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage) + r, err := c.DoAPIGet(c.teamsRoute()+query, etag) + if err != nil { + return nil, 0, BuildResponse(r), err + } + defer closeBody(r) + var listWithCount TeamsWithCount + if jsonErr := json.NewDecoder(r.Body).Decode(&listWithCount); jsonErr != nil { + return nil, 0, nil, NewAppError("GetAllTeamsWithTotalCount", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return listWithCount.Teams, listWithCount.TotalCount, BuildResponse(r), nil +} + +// GetAllTeamsExcludePolicyConstrained returns all teams which are not part of a data retention policy. +// Must be a system administrator. +func (c *Client4) GetAllTeamsExcludePolicyConstrained(etag string, page int, perPage int) ([]*Team, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&exclude_policy_constrained=%v", page, perPage, true) + r, err := c.DoAPIGet(c.teamsRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Team + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetAllTeamsExcludePolicyConstrained", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetTeamByName returns a team based on the provided team name string. +func (c *Client4) GetTeamByName(name, etag string) (*Team, *Response, error) { + r, err := c.DoAPIGet(c.teamByNameRoute(name), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("GetTeamByName", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// SearchTeams returns teams matching the provided search term. +func (c *Client4) SearchTeams(search *TeamSearch) ([]*Team, *Response, error) { + buf, err := json.Marshal(search) + if err != nil { + return nil, nil, NewAppError("SearchTeams", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.teamsRoute()+"/search", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Team + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("SearchTeams", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// SearchTeamsPaged returns a page of teams and the total count matching the provided search term. +func (c *Client4) SearchTeamsPaged(search *TeamSearch) ([]*Team, int64, *Response, error) { + if search.Page == nil { + search.Page = NewInt(0) + } + if search.PerPage == nil { + search.PerPage = NewInt(100) + } + buf, err := json.Marshal(search) + if err != nil { + return nil, 0, BuildResponse(nil), NewAppError("SearchTeamsPaged", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.teamsRoute()+"/search", buf) + if err != nil { + return nil, 0, BuildResponse(r), err + } + defer closeBody(r) + var listWithCount TeamsWithCount + if jsonErr := json.NewDecoder(r.Body).Decode(&listWithCount); jsonErr != nil { + return nil, 0, nil, NewAppError("GetAllTeamsWithTotalCount", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return listWithCount.Teams, listWithCount.TotalCount, BuildResponse(r), nil +} + +// TeamExists returns true or false if the team exist or not. +func (c *Client4) TeamExists(name, etag string) (bool, *Response, error) { + r, err := c.DoAPIGet(c.teamByNameRoute(name)+"/exists", etag) + if err != nil { + return false, BuildResponse(r), err + } + defer closeBody(r) + return MapBoolFromJSON(r.Body)["exists"], BuildResponse(r), nil +} + +// GetTeamsForUser returns a list of teams a user is on. Must be logged in as the user +// or be a system administrator. +func (c *Client4) GetTeamsForUser(userId, etag string) ([]*Team, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId)+"/teams", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Team + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetTeamsForUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetTeamMember returns a team member based on the provided team and user id strings. +func (c *Client4) GetTeamMember(teamId, userId, etag string) (*TeamMember, *Response, error) { + r, err := c.DoAPIGet(c.teamMemberRoute(teamId, userId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tm TeamMember + if r.StatusCode == http.StatusNotModified { + return &tm, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&tm); jsonErr != nil { + return nil, nil, NewAppError("GetTeamMember", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &tm, BuildResponse(r), nil +} + +// UpdateTeamMemberRoles will update the roles on a team for a user. +func (c *Client4) UpdateTeamMemberRoles(teamId, userId, newRoles string) (*Response, error) { + requestBody := map[string]string{"roles": newRoles} + r, err := c.DoAPIPut(c.teamMemberRoute(teamId, userId)+"/roles", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateTeamMemberSchemeRoles will update the scheme-derived roles on a team for a user. +func (c *Client4) UpdateTeamMemberSchemeRoles(teamId string, userId string, schemeRoles *SchemeRoles) (*Response, error) { + buf, err := json.Marshal(schemeRoles) + if err != nil { + return nil, NewAppError("UpdateTeamMemberSchemeRoles", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.teamMemberRoute(teamId, userId)+"/schemeRoles", buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateTeam will update a team. +func (c *Client4) UpdateTeam(team *Team) (*Team, *Response, error) { + buf, err := json.Marshal(team) + if err != nil { + return nil, nil, NewAppError("UpdateTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.teamRoute(team.Id), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("UpdateTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// PatchTeam partially updates a team. Any missing fields are not updated. +func (c *Client4) PatchTeam(teamId string, patch *TeamPatch) (*Team, *Response, error) { + buf, err := json.Marshal(patch) + if err != nil { + return nil, nil, NewAppError("PatchTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.teamRoute(teamId)+"/patch", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("PatchTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// RestoreTeam restores a previously deleted team. +func (c *Client4) RestoreTeam(teamId string) (*Team, *Response, error) { + r, err := c.DoAPIPost(c.teamRoute(teamId)+"/restore", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("RestoreTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// RegenerateTeamInviteId requests a new invite ID to be generated. +func (c *Client4) RegenerateTeamInviteId(teamId string) (*Team, *Response, error) { + r, err := c.DoAPIPost(c.teamRoute(teamId)+"/regenerate_invite_id", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("RegenerateTeamInviteId", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// SoftDeleteTeam deletes the team softly (archive only, not permanent delete). +func (c *Client4) SoftDeleteTeam(teamId string) (*Response, error) { + r, err := c.DoAPIDelete(c.teamRoute(teamId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// PermanentDeleteTeam deletes the team, should only be used when needed for +// compliance and the like. +func (c *Client4) PermanentDeleteTeam(teamId string) (*Response, error) { + r, err := c.DoAPIDelete(c.teamRoute(teamId) + "?permanent=" + c.boolString(true)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateTeamPrivacy modifies the team type (model.TeamOpen <--> model.TeamInvite) and sets +// the corresponding AllowOpenInvite appropriately. +func (c *Client4) UpdateTeamPrivacy(teamId string, privacy string) (*Team, *Response, error) { + requestBody := map[string]string{"privacy": privacy} + r, err := c.DoAPIPut(c.teamRoute(teamId)+"/privacy", MapToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("UpdateTeamPrivacy", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// GetTeamMembers returns team members based on the provided team id string. +func (c *Client4) GetTeamMembers(teamId string, page int, perPage int, etag string) ([]*TeamMember, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.teamMembersRoute(teamId)+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tms []*TeamMember + if r.StatusCode == http.StatusNotModified { + return tms, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&tms); jsonErr != nil { + return nil, nil, NewAppError("GetTeamMembers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return tms, BuildResponse(r), nil +} + +// GetTeamMembersWithoutDeletedUsers returns team members based on the provided team id string. Additional parameters of sort and exclude_deleted_users accepted as well +// Could not add it to above function due to it be a breaking change. +func (c *Client4) GetTeamMembersSortAndWithoutDeletedUsers(teamId string, page int, perPage int, sort string, excludeDeletedUsers bool, etag string) ([]*TeamMember, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&sort=%v&exclude_deleted_users=%v", page, perPage, sort, excludeDeletedUsers) + r, err := c.DoAPIGet(c.teamMembersRoute(teamId)+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tms []*TeamMember + if r.StatusCode == http.StatusNotModified { + return tms, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&tms); jsonErr != nil { + return nil, nil, NewAppError("GetTeamMembersSortAndWithoutDeletedUsers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return tms, BuildResponse(r), nil +} + +// GetTeamMembersForUser returns the team members for a user. +func (c *Client4) GetTeamMembersForUser(userId string, etag string) ([]*TeamMember, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId)+"/teams/members", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tms []*TeamMember + if r.StatusCode == http.StatusNotModified { + return tms, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&tms); jsonErr != nil { + return nil, nil, NewAppError("GetTeamMembersForUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return tms, BuildResponse(r), nil +} + +// GetTeamMembersByIds will return an array of team members based on the +// team id and a list of user ids provided. Must be authenticated. +func (c *Client4) GetTeamMembersByIds(teamId string, userIds []string) ([]*TeamMember, *Response, error) { + r, err := c.DoAPIPost(fmt.Sprintf("/teams/%v/members/ids", teamId), ArrayToJSON(userIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tms []*TeamMember + if jsonErr := json.NewDecoder(r.Body).Decode(&tms); jsonErr != nil { + return nil, nil, NewAppError("GetTeamMembersByIds", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return tms, BuildResponse(r), nil +} + +// AddTeamMember adds user to a team and return a team member. +func (c *Client4) AddTeamMember(teamId, userId string) (*TeamMember, *Response, error) { + member := &TeamMember{TeamId: teamId, UserId: userId} + buf, err := json.Marshal(member) + if err != nil { + return nil, nil, NewAppError("AddTeamMember", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.teamMembersRoute(teamId), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tm TeamMember + if jsonErr := json.NewDecoder(r.Body).Decode(&tm); jsonErr != nil { + return nil, nil, NewAppError("AddTeamMember", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &tm, BuildResponse(r), nil +} + +// AddTeamMemberFromInvite adds a user to a team and return a team member using an invite id +// or an invite token/data pair. +func (c *Client4) AddTeamMemberFromInvite(token, inviteId string) (*TeamMember, *Response, error) { + var query string + + if inviteId != "" { + query += fmt.Sprintf("?invite_id=%v", inviteId) + } + + if token != "" { + query += fmt.Sprintf("?token=%v", token) + } + + r, err := c.DoAPIPost(c.teamsRoute()+"/members/invite"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tm TeamMember + if jsonErr := json.NewDecoder(r.Body).Decode(&tm); jsonErr != nil { + return nil, nil, NewAppError("AddTeamMemberFromInvite", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &tm, BuildResponse(r), nil +} + +// AddTeamMembers adds a number of users to a team and returns the team members. +func (c *Client4) AddTeamMembers(teamId string, userIds []string) ([]*TeamMember, *Response, error) { + var members []*TeamMember + for _, userId := range userIds { + member := &TeamMember{TeamId: teamId, UserId: userId} + members = append(members, member) + } + js, jsonErr := json.Marshal(members) + if jsonErr != nil { + return nil, nil, NewAppError("AddTeamMembers", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.teamMembersRoute(teamId)+"/batch", string(js)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tms []*TeamMember + if jsonErr := json.NewDecoder(r.Body).Decode(&tms); jsonErr != nil { + return nil, nil, NewAppError("AddTeamMembers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return tms, BuildResponse(r), nil +} + +// AddTeamMembers adds a number of users to a team and returns the team members. +func (c *Client4) AddTeamMembersGracefully(teamId string, userIds []string) ([]*TeamMemberWithError, *Response, error) { + var members []*TeamMember + for _, userId := range userIds { + member := &TeamMember{TeamId: teamId, UserId: userId} + members = append(members, member) + } + js, jsonErr := json.Marshal(members) + if jsonErr != nil { + return nil, nil, NewAppError("AddTeamMembersGracefully", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + + r, err := c.DoAPIPost(c.teamMembersRoute(teamId)+"/batch?graceful="+c.boolString(true), string(js)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tms []*TeamMemberWithError + if jsonErr := json.NewDecoder(r.Body).Decode(&tms); jsonErr != nil { + return nil, nil, NewAppError("AddTeamMembersGracefully", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return tms, BuildResponse(r), nil +} + +// RemoveTeamMember will remove a user from a team. +func (c *Client4) RemoveTeamMember(teamId, userId string) (*Response, error) { + r, err := c.DoAPIDelete(c.teamMemberRoute(teamId, userId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetTeamStats returns a team stats based on the team id string. +// Must be authenticated. +func (c *Client4) GetTeamStats(teamId, etag string) (*TeamStats, *Response, error) { + r, err := c.DoAPIGet(c.teamStatsRoute(teamId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var ts TeamStats + if jsonErr := json.NewDecoder(r.Body).Decode(&ts); jsonErr != nil { + return nil, nil, NewAppError("GetTeamStats", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &ts, BuildResponse(r), nil +} + +// GetTotalUsersStats returns a total system user stats. +// Must be authenticated. +func (c *Client4) GetTotalUsersStats(etag string) (*UsersStats, *Response, error) { + r, err := c.DoAPIGet(c.totalUsersStatsRoute(), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var stats UsersStats + if jsonErr := json.NewDecoder(r.Body).Decode(&stats); jsonErr != nil { + return nil, nil, NewAppError("GetTotalUsersStats", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &stats, BuildResponse(r), nil +} + +// GetTeamUnread will return a TeamUnread object that contains the amount of +// unread messages and mentions the user has for the specified team. +// Must be authenticated. +func (c *Client4) GetTeamUnread(teamId, userId string) (*TeamUnread, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId)+c.teamRoute(teamId)+"/unread", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tu TeamUnread + if jsonErr := json.NewDecoder(r.Body).Decode(&tu); jsonErr != nil { + return nil, nil, NewAppError("GetTeamUnread", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &tu, BuildResponse(r), nil +} + +// ImportTeam will import an exported team from other app into a existing team. +func (c *Client4) ImportTeam(data []byte, filesize int, importFrom, filename, teamId string) (map[string]string, *Response, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + return nil, nil, err + } + + if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, nil, err + } + + part, err = writer.CreateFormField("filesize") + if err != nil { + return nil, nil, err + } + + if _, err = io.Copy(part, strings.NewReader(strconv.Itoa(filesize))); err != nil { + return nil, nil, err + } + + part, err = writer.CreateFormField("importFrom") + if err != nil { + return nil, nil, err + } + + if _, err := io.Copy(part, strings.NewReader(importFrom)); err != nil { + return nil, nil, err + } + + if err := writer.Close(); err != nil { + return nil, nil, err + } + + return c.DoUploadImportTeam(c.teamImportRoute(teamId), body.Bytes(), writer.FormDataContentType()) +} + +// InviteUsersToTeam invite users by email to the team. +func (c *Client4) InviteUsersToTeam(teamId string, userEmails []string) (*Response, error) { + r, err := c.DoAPIPost(c.teamRoute(teamId)+"/invite/email", ArrayToJSON(userEmails)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// InviteGuestsToTeam invite guest by email to some channels in a team. +func (c *Client4) InviteGuestsToTeam(teamId string, userEmails []string, channels []string, message string) (*Response, error) { + guestsInvite := GuestsInvite{ + Emails: userEmails, + Channels: channels, + Message: message, + } + buf, err := json.Marshal(guestsInvite) + if err != nil { + return nil, NewAppError("InviteGuestsToTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.teamRoute(teamId)+"/invite-guests/email", buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// InviteUsersToTeam invite users by email to the team. +func (c *Client4) InviteUsersToTeamGracefully(teamId string, userEmails []string) ([]*EmailInviteWithError, *Response, error) { + r, err := c.DoAPIPost(c.teamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), ArrayToJSON(userEmails)) + + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*EmailInviteWithError + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("InviteUsersToTeamGracefully", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// InviteUsersToTeam invite users by email to the team. +func (c *Client4) InviteUsersToTeamAndChannelsGracefully(teamId string, userEmails []string, channelIds []string, message string) ([]*EmailInviteWithError, *Response, error) { + memberInvite := MemberInvite{ + Emails: userEmails, + ChannelIds: channelIds, + Message: message, + } + buf, err := json.Marshal(memberInvite) + if err != nil { + return nil, nil, NewAppError("InviteMembersToTeamAndChannels", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.teamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*EmailInviteWithError + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("InviteUsersToTeamGracefully", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// InviteGuestsToTeam invite guest by email to some channels in a team. +func (c *Client4) InviteGuestsToTeamGracefully(teamId string, userEmails []string, channels []string, message string) ([]*EmailInviteWithError, *Response, error) { + guestsInvite := GuestsInvite{ + Emails: userEmails, + Channels: channels, + Message: message, + } + buf, err := json.Marshal(guestsInvite) + if err != nil { + return nil, nil, NewAppError("InviteGuestsToTeamGracefully", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.teamRoute(teamId)+"/invite-guests/email?graceful="+c.boolString(true), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*EmailInviteWithError + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("InviteGuestsToTeamGracefully", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// InvalidateEmailInvites will invalidate active email invitations that have not been accepted by the user. +func (c *Client4) InvalidateEmailInvites() (*Response, error) { + r, err := c.DoAPIDelete(c.teamsRoute() + "/invites/email") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetTeamInviteInfo returns a team object from an invite id containing sanitized information. +func (c *Client4) GetTeamInviteInfo(inviteId string) (*Team, *Response, error) { + r, err := c.DoAPIGet(c.teamsRoute()+"/invite/"+inviteId, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var t Team + if jsonErr := json.NewDecoder(r.Body).Decode(&t); jsonErr != nil { + return nil, nil, NewAppError("GetTeamInviteInfo", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &t, BuildResponse(r), nil +} + +// SetTeamIcon sets team icon of the team. +func (c *Client4) SetTeamIcon(teamId string, data []byte) (*Response, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("image", "teamIcon.png") + if err != nil { + return nil, NewAppError("SetTeamIcon", "model.client.set_team_icon.no_file.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, NewAppError("SetTeamIcon", "model.client.set_team_icon.no_file.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if err = writer.Close(); err != nil { + return nil, NewAppError("SetTeamIcon", "model.client.set_team_icon.writer.app_error", nil, err.Error(), http.StatusBadRequest) + } + + rq, err := http.NewRequest("POST", c.APIURL+c.teamRoute(teamId)+"/image", bytes.NewReader(body.Bytes())) + if err != nil { + return nil, err + } + rq.Header.Set("Content-Type", writer.FormDataContentType()) + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + return BuildResponse(rp), nil +} + +// GetTeamIcon gets the team icon of the team. +func (c *Client4) GetTeamIcon(teamId, etag string) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.teamRoute(teamId)+"/image", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetTeamIcon", "model.client.get_team_icon.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// RemoveTeamIcon updates LastTeamIconUpdate to 0 which indicates team icon is removed. +func (c *Client4) RemoveTeamIcon(teamId string) (*Response, error) { + r, err := c.DoAPIDelete(c.teamRoute(teamId) + "/image") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// Channel Section + +// GetAllChannels get all the channels. Must be a system administrator. +func (c *Client4) GetAllChannels(page int, perPage int, etag string) (ChannelListWithTeamData, *Response, error) { + return c.getAllChannels(page, perPage, etag, ChannelSearchOpts{}) +} + +// GetAllChannelsIncludeDeleted get all the channels. Must be a system administrator. +func (c *Client4) GetAllChannelsIncludeDeleted(page int, perPage int, etag string) (ChannelListWithTeamData, *Response, error) { + return c.getAllChannels(page, perPage, etag, ChannelSearchOpts{IncludeDeleted: true}) +} + +// GetAllChannelsExcludePolicyConstrained gets all channels which are not part of a data retention policy. +// Must be a system administrator. +func (c *Client4) GetAllChannelsExcludePolicyConstrained(page, perPage int, etag string) (ChannelListWithTeamData, *Response, error) { + return c.getAllChannels(page, perPage, etag, ChannelSearchOpts{ExcludePolicyConstrained: true}) +} + +func (c *Client4) getAllChannels(page int, perPage int, etag string, opts ChannelSearchOpts) (ChannelListWithTeamData, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted=%v&exclude_policy_constrained=%v", + page, perPage, opts.IncludeDeleted, opts.ExcludePolicyConstrained) + r, err := c.DoAPIGet(c.channelsRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelListWithTeamData + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("getAllChannels", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetAllChannelsWithCount get all the channels including the total count. Must be a system administrator. +func (c *Client4) GetAllChannelsWithCount(page int, perPage int, etag string) (ChannelListWithTeamData, int64, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage) + r, err := c.DoAPIGet(c.channelsRoute()+query, etag) + if err != nil { + return nil, 0, BuildResponse(r), err + } + defer closeBody(r) + + var cwc *ChannelsWithCount + err = json.NewDecoder(r.Body).Decode(&cwc) + if err != nil { + return nil, 0, BuildResponse(r), NewAppError("GetAllChannelsWithCount", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return cwc.Channels, cwc.TotalCount, BuildResponse(r), nil +} + +// CreateChannel creates a channel based on the provided channel struct. +func (c *Client4) CreateChannel(channel *Channel) (*Channel, *Response, error) { + channelJSON, jsonErr := json.Marshal(channel) + if jsonErr != nil { + return nil, nil, NewAppError("CreateChannel", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.channelsRoute(), string(channelJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("CreateChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// UpdateChannel updates a channel based on the provided channel struct. +func (c *Client4) UpdateChannel(channel *Channel) (*Channel, *Response, error) { + channelJSON, jsonErr := json.Marshal(channel) + if jsonErr != nil { + return nil, nil, NewAppError("UpdateChannel", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPut(c.channelRoute(channel.Id), string(channelJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("UpdateChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// PatchChannel partially updates a channel. Any missing fields are not updated. +func (c *Client4) PatchChannel(channelId string, patch *ChannelPatch) (*Channel, *Response, error) { + buf, err := json.Marshal(patch) + if err != nil { + return nil, nil, NewAppError("PatchChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.channelRoute(channelId)+"/patch", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("PatchChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// UpdateChannelPrivacy updates channel privacy +func (c *Client4) UpdateChannelPrivacy(channelId string, privacy ChannelType) (*Channel, *Response, error) { + requestBody := map[string]string{"privacy": string(privacy)} + r, err := c.DoAPIPut(c.channelRoute(channelId)+"/privacy", MapToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("UpdateChannelPrivacy", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// RestoreChannel restores a previously deleted channel. Any missing fields are not updated. +func (c *Client4) RestoreChannel(channelId string) (*Channel, *Response, error) { + r, err := c.DoAPIPost(c.channelRoute(channelId)+"/restore", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("RestoreChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// CreateDirectChannel creates a direct message channel based on the two user +// ids provided. +func (c *Client4) CreateDirectChannel(userId1, userId2 string) (*Channel, *Response, error) { + requestBody := []string{userId1, userId2} + r, err := c.DoAPIPost(c.channelsRoute()+"/direct", ArrayToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("CreateDirectChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// CreateGroupChannel creates a group message channel based on userIds provided. +func (c *Client4) CreateGroupChannel(userIds []string) (*Channel, *Response, error) { + r, err := c.DoAPIPost(c.channelsRoute()+"/group", ArrayToJSON(userIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("CreateGroupChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannel returns a channel based on the provided channel id string. +func (c *Client4) GetChannel(channelId, etag string) (*Channel, *Response, error) { + r, err := c.DoAPIGet(c.channelRoute(channelId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelStats returns statistics for a channel. +func (c *Client4) GetChannelStats(channelId string, etag string) (*ChannelStats, *Response, error) { + r, err := c.DoAPIGet(c.channelRoute(channelId)+"/stats", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var stats ChannelStats + if jsonErr := json.NewDecoder(r.Body).Decode(&stats); jsonErr != nil { + return nil, nil, NewAppError("GetChannelStats", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &stats, BuildResponse(r), nil +} + +// GetChannelMembersTimezones gets a list of timezones for a channel. +func (c *Client4) GetChannelMembersTimezones(channelId string) ([]string, *Response, error) { + r, err := c.DoAPIGet(c.channelRoute(channelId)+"/timezones", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ArrayFromJSON(r.Body), BuildResponse(r), nil +} + +// GetPinnedPosts gets a list of pinned posts. +func (c *Client4) GetPinnedPosts(channelId string, etag string) (*PostList, *Response, error) { + r, err := c.DoAPIGet(c.channelRoute(channelId)+"/pinned", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPinnedPosts", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetPrivateChannelsForTeam returns a list of private channels based on the provided team id string. +func (c *Client4) GetPrivateChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response, error) { + query := fmt.Sprintf("/private?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.channelsForTeamRoute(teamId)+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetPrivateChannelsForTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetPublicChannelsForTeam returns a list of public channels based on the provided team id string. +func (c *Client4) GetPublicChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.channelsForTeamRoute(teamId)+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetPublicChannelsForTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetDeletedChannelsForTeam returns a list of public channels based on the provided team id string. +func (c *Client4) GetDeletedChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response, error) { + query := fmt.Sprintf("/deleted?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.channelsForTeamRoute(teamId)+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetDeletedChannelsForTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetPublicChannelsByIdsForTeam returns a list of public channels based on provided team id string. +func (c *Client4) GetPublicChannelsByIdsForTeam(teamId string, channelIds []string) ([]*Channel, *Response, error) { + r, err := c.DoAPIPost(c.channelsForTeamRoute(teamId)+"/ids", ArrayToJSON(channelIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetPublicChannelsByIdsForTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelsForTeamForUser returns a list channels of on a team for a user. +func (c *Client4) GetChannelsForTeamForUser(teamId, userId string, includeDeleted bool, etag string) ([]*Channel, *Response, error) { + r, err := c.DoAPIGet(c.channelsForTeamForUserRoute(teamId, userId, includeDeleted), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelsForTeamForUser", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelsForTeamAndUserWithLastDeleteAt returns a list channels of a team for a user, additionally filtered with lastDeleteAt. This does not have any effect if includeDeleted is set to false. +func (c *Client4) GetChannelsForTeamAndUserWithLastDeleteAt(teamId, userId string, includeDeleted bool, lastDeleteAt int, etag string) ([]*Channel, *Response, error) { + route := fmt.Sprintf(c.userRoute(userId) + c.teamRoute(teamId) + "/channels") + route += fmt.Sprintf("?include_deleted=%v&last_delete_at=%d", includeDeleted, lastDeleteAt) + r, err := c.DoAPIGet(route, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelsForTeamAndUserWithLastDeleteAt", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelsForUserWithLastDeleteAt returns a list channels for a user, additionally filtered with lastDeleteAt. +func (c *Client4) GetChannelsForUserWithLastDeleteAt(userID string, lastDeleteAt int) ([]*Channel, *Response, error) { + route := fmt.Sprintf(c.userRoute(userID) + "/channels") + route += fmt.Sprintf("?last_delete_at=%d", lastDeleteAt) + r, err := c.DoAPIGet(route, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelsForUserWithLastDeleteAt", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// SearchChannels returns the channels on a team matching the provided search term. +func (c *Client4) SearchChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response, error) { + searchJSON, jsonErr := json.Marshal(search) + if jsonErr != nil { + return nil, nil, NewAppError("SearchChannels", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.channelsForTeamRoute(teamId)+"/search", string(searchJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("SearchChannels", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// SearchArchivedChannels returns the archived channels on a team matching the provided search term. +func (c *Client4) SearchArchivedChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response, error) { + searchJSON, jsonErr := json.Marshal(search) + if jsonErr != nil { + return nil, nil, NewAppError("SearchArchivedChannels", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.channelsForTeamRoute(teamId)+"/search_archived", string(searchJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("SearchArchivedChannels", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// SearchAllChannels search in all the channels. Must be a system administrator. +func (c *Client4) SearchAllChannels(search *ChannelSearch) (ChannelListWithTeamData, *Response, error) { + searchJSON, jsonErr := json.Marshal(search) + if jsonErr != nil { + return nil, nil, NewAppError("SearchAllChannels", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.channelsRoute()+"/search", string(searchJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelListWithTeamData + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("SearchAllChannels", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// SearchAllChannelsForUser search in all the channels for a regular user. +func (c *Client4) SearchAllChannelsForUser(term string) (ChannelListWithTeamData, *Response, error) { + search := &ChannelSearch{ + Term: term, + } + searchJSON, jsonErr := json.Marshal(search) + if jsonErr != nil { + return nil, nil, NewAppError("SearchAllChannelsForUser", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.channelsRoute()+"/search?system_console=false", string(searchJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelListWithTeamData + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("SearchAllChannelsForUser", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// SearchAllChannelsPaged searches all the channels and returns the results paged with the total count. +func (c *Client4) SearchAllChannelsPaged(search *ChannelSearch) (*ChannelsWithCount, *Response, error) { + searchJSON, jsonErr := json.Marshal(search) + if jsonErr != nil { + return nil, nil, NewAppError("SearchAllChannelsPaged", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.channelsRoute()+"/search", string(searchJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var cwc *ChannelsWithCount + err = json.NewDecoder(r.Body).Decode(&cwc) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetAllChannelsWithCount", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return cwc, BuildResponse(r), nil +} + +// SearchGroupChannels returns the group channels of the user whose members' usernames match the search term. +func (c *Client4) SearchGroupChannels(search *ChannelSearch) ([]*Channel, *Response, error) { + searchJSON, jsonErr := json.Marshal(search) + if jsonErr != nil { + return nil, nil, NewAppError("SearchGroupChannels", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.channelsRoute()+"/group/search", string(searchJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("SearchGroupChannels", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// DeleteChannel deletes channel based on the provided channel id string. +func (c *Client4) DeleteChannel(channelId string) (*Response, error) { + r, err := c.DoAPIDelete(c.channelRoute(channelId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// PermanentDeleteChannel deletes a channel based on the provided channel id string. +func (c *Client4) PermanentDeleteChannel(channelId string) (*Response, error) { + r, err := c.DoAPIDelete(c.channelRoute(channelId) + "?permanent=" + c.boolString(true)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// MoveChannel moves the channel to the destination team. +func (c *Client4) MoveChannel(channelId, teamId string, force bool) (*Channel, *Response, error) { + requestBody := map[string]interface{}{ + "team_id": teamId, + "force": force, + } + r, err := c.DoAPIPost(c.channelRoute(channelId)+"/move", StringInterfaceToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("MoveChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelByName returns a channel based on the provided channel name and team id strings. +func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Channel, *Response, error) { + r, err := c.DoAPIGet(c.channelByNameRoute(channelName, teamId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelByName", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelByNameIncludeDeleted returns a channel based on the provided channel name and team id strings. Other then GetChannelByName it will also return deleted channels. +func (c *Client4) GetChannelByNameIncludeDeleted(channelName, teamId string, etag string) (*Channel, *Response, error) { + r, err := c.DoAPIGet(c.channelByNameRoute(channelName, teamId)+"?include_deleted="+c.boolString(true), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelByNameIncludeDeleted", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelByNameForTeamName returns a channel based on the provided channel name and team name strings. +func (c *Client4) GetChannelByNameForTeamName(channelName, teamName string, etag string) (*Channel, *Response, error) { + r, err := c.DoAPIGet(c.channelByNameForTeamNameRoute(channelName, teamName), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelByNameForTeamName", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelByNameForTeamNameIncludeDeleted returns a channel based on the provided channel name and team name strings. Other then GetChannelByNameForTeamName it will also return deleted channels. +func (c *Client4) GetChannelByNameForTeamNameIncludeDeleted(channelName, teamName string, etag string) (*Channel, *Response, error) { + r, err := c.DoAPIGet(c.channelByNameForTeamNameRoute(channelName, teamName)+"?include_deleted="+c.boolString(true), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *Channel + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelByNameForTeamNameIncludeDeleted", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelMembers gets a page of channel members specific to a channel. +func (c *Client4) GetChannelMembers(channelId string, page, perPage int, etag string) (ChannelMembers, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.channelMembersRoute(channelId)+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelMembers + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelMembers", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelMembersWithTeamData gets a page of all channel members for a user. +func (c *Client4) GetChannelMembersWithTeamData(userID string, page, perPage int) (ChannelMembersWithTeamData, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.userRoute(userID)+"/channel_members"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelMembersWithTeamData + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelMembersWithTeamData", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelMembersByIds gets the channel members in a channel for a list of user ids. +func (c *Client4) GetChannelMembersByIds(channelId string, userIds []string) (ChannelMembers, *Response, error) { + r, err := c.DoAPIPost(c.channelMembersRoute(channelId)+"/ids", ArrayToJSON(userIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelMembers + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelMembersByIds", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelMember gets a channel member. +func (c *Client4) GetChannelMember(channelId, userId, etag string) (*ChannelMember, *Response, error) { + r, err := c.DoAPIGet(c.channelMemberRoute(channelId, userId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *ChannelMember + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelMember", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelMembersForUser gets all the channel members for a user on a team. +func (c *Client4) GetChannelMembersForUser(userId, teamId, etag string) (ChannelMembers, *Response, error) { + r, err := c.DoAPIGet(fmt.Sprintf(c.userRoute(userId)+"/teams/%v/channels/members", teamId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelMembers + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelMembersForUser", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// ViewChannel performs a view action for a user. Synonymous with switching channels or marking channels as read by a user. +func (c *Client4) ViewChannel(userId string, view *ChannelView) (*ChannelViewResponse, *Response, error) { + url := fmt.Sprintf(c.channelsRoute()+"/members/%v/view", userId) + buf, err := json.Marshal(view) + if err != nil { + return nil, nil, NewAppError("ViewChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(url, buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *ChannelViewResponse + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("ViewChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// GetChannelUnread will return a ChannelUnread object that contains the number of +// unread messages and mentions for a user. +func (c *Client4) GetChannelUnread(channelId, userId string) (*ChannelUnread, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId)+c.channelRoute(channelId)+"/unread", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *ChannelUnread + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelUnread", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// UpdateChannelRoles will update the roles on a channel for a user. +func (c *Client4) UpdateChannelRoles(channelId, userId, roles string) (*Response, error) { + requestBody := map[string]string{"roles": roles} + r, err := c.DoAPIPut(c.channelMemberRoute(channelId, userId)+"/roles", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateChannelMemberSchemeRoles will update the scheme-derived roles on a channel for a user. +func (c *Client4) UpdateChannelMemberSchemeRoles(channelId string, userId string, schemeRoles *SchemeRoles) (*Response, error) { + buf, err := json.Marshal(schemeRoles) + if err != nil { + return nil, NewAppError("UpdateChannelMemberSchemeRoles", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.channelMemberRoute(channelId, userId)+"/schemeRoles", buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateChannelNotifyProps will update the notification properties on a channel for a user. +func (c *Client4) UpdateChannelNotifyProps(channelId, userId string, props map[string]string) (*Response, error) { + r, err := c.DoAPIPut(c.channelMemberRoute(channelId, userId)+"/notify_props", MapToJSON(props)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// AddChannelMember adds user to channel and return a channel member. +func (c *Client4) AddChannelMember(channelId, userId string) (*ChannelMember, *Response, error) { + requestBody := map[string]string{"user_id": userId} + r, err := c.DoAPIPost(c.channelMembersRoute(channelId)+"", MapToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *ChannelMember + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("AddChannelMember", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// AddChannelMemberWithRootId adds user to channel and return a channel member. Post add to channel message has the postRootId. +func (c *Client4) AddChannelMemberWithRootId(channelId, userId, postRootId string) (*ChannelMember, *Response, error) { + requestBody := map[string]string{"user_id": userId, "post_root_id": postRootId} + r, err := c.DoAPIPost(c.channelMembersRoute(channelId)+"", MapToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch *ChannelMember + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("AddChannelMemberWithRootId", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// RemoveUserFromChannel will delete the channel member object for a user, effectively removing the user from a channel. +func (c *Client4) RemoveUserFromChannel(channelId, userId string) (*Response, error) { + r, err := c.DoAPIDelete(c.channelMemberRoute(channelId, userId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// AutocompleteChannelsForTeam will return an ordered list of channels autocomplete suggestions. +func (c *Client4) AutocompleteChannelsForTeam(teamId, name string) (ChannelList, *Response, error) { + query := fmt.Sprintf("?name=%v", name) + r, err := c.DoAPIGet(c.channelsForTeamRoute(teamId)+"/autocomplete"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelList + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("AutocompleteChannelsForTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// AutocompleteChannelsForTeamForSearch will return an ordered list of your channels autocomplete suggestions. +func (c *Client4) AutocompleteChannelsForTeamForSearch(teamId, name string) (ChannelList, *Response, error) { + query := fmt.Sprintf("?name=%v", name) + r, err := c.DoAPIGet(c.channelsForTeamRoute(teamId)+"/search_autocomplete"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelList + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("AutocompleteChannelsForTeamForSearch", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// Post Section + +// CreatePost creates a post based on the provided post struct. +func (c *Client4) CreatePost(post *Post) (*Post, *Response, error) { + postJSON, jsonErr := json.Marshal(post) + if jsonErr != nil { + return nil, nil, NewAppError("CreatePost", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.postsRoute(), string(postJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p Post + if r.StatusCode == http.StatusNotModified { + return &p, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("CreatePost", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// CreatePostEphemeral creates a ephemeral post based on the provided post struct which is send to the given user id. +func (c *Client4) CreatePostEphemeral(post *PostEphemeral) (*Post, *Response, error) { + postJSON, jsonErr := json.Marshal(post) + if jsonErr != nil { + return nil, nil, NewAppError("CreatePostEphemeral", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.postsEphemeralRoute(), string(postJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p Post + if r.StatusCode == http.StatusNotModified { + return &p, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("CreatePostEphemeral", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// UpdatePost updates a post based on the provided post struct. +func (c *Client4) UpdatePost(postId string, post *Post) (*Post, *Response, error) { + postJSON, jsonErr := json.Marshal(post) + if jsonErr != nil { + return nil, nil, NewAppError("UpdatePost", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPut(c.postRoute(postId), string(postJSON)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p Post + if r.StatusCode == http.StatusNotModified { + return &p, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("UpdatePost", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// PatchPost partially updates a post. Any missing fields are not updated. +func (c *Client4) PatchPost(postId string, patch *PostPatch) (*Post, *Response, error) { + buf, err := json.Marshal(patch) + if err != nil { + return nil, nil, NewAppError("PatchPost", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.postRoute(postId)+"/patch", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p Post + if r.StatusCode == http.StatusNotModified { + return &p, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("PatchPost", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// SetPostUnread marks channel where post belongs as unread on the time of the provided post. +func (c *Client4) SetPostUnread(userId string, postId string, collapsedThreadsSupported bool) (*Response, error) { + b, err := json.Marshal(map[string]bool{"collapsed_threads_supported": collapsedThreadsSupported}) + if err != nil { + return nil, NewAppError("SetPostUnread", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.userRoute(userId)+c.postRoute(postId)+"/set_unread", b) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// PinPost pin a post based on provided post id string. +func (c *Client4) PinPost(postId string) (*Response, error) { + r, err := c.DoAPIPost(c.postRoute(postId)+"/pin", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UnpinPost unpin a post based on provided post id string. +func (c *Client4) UnpinPost(postId string) (*Response, error) { + r, err := c.DoAPIPost(c.postRoute(postId)+"/unpin", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetPost gets a single post. +func (c *Client4) GetPost(postId string, etag string) (*Post, *Response, error) { + r, err := c.DoAPIGet(c.postRoute(postId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var post Post + if r.StatusCode == http.StatusNotModified { + return &post, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&post); jsonErr != nil { + return nil, nil, NewAppError("GetPost", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &post, BuildResponse(r), nil +} + +// DeletePost deletes a post from the provided post id string. +func (c *Client4) DeletePost(postId string) (*Response, error) { + r, err := c.DoAPIDelete(c.postRoute(postId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetPostThread gets a post with all the other posts in the same thread. +func (c *Client4) GetPostThread(postId string, etag string, collapsedThreads bool) (*PostList, *Response, error) { + url := c.postRoute(postId) + "/thread" + if collapsedThreads { + url += "?collapsedThreads=true" + } + r, err := c.DoAPIGet(url, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPostThread", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetPostThreadWithOpts gets a post with all the other posts in the same thread. +func (c *Client4) GetPostThreadWithOpts(postID string, etag string, opts GetPostsOptions) (*PostList, *Response, error) { + urlVal := c.postRoute(postID) + "/thread" + + values := url.Values{} + if opts.CollapsedThreads { + values.Set("collapsedThreads", "true") + } + if opts.CollapsedThreadsExtended { + values.Set("collapsedThreadsExtended", "true") + } + if opts.SkipFetchThreads { + values.Set("skipFetchThreads", "true") + } + if opts.PerPage != 0 { + values.Set("perPage", strconv.Itoa(opts.PerPage)) + } + if opts.FromPost != "" { + values.Set("fromPost", opts.FromPost) + } + if opts.FromCreateAt != 0 { + values.Set("fromCreateAt", strconv.FormatInt(opts.FromCreateAt, 10)) + } + if opts.Direction != "" { + values.Set("direction", opts.Direction) + } + urlVal += "?" + values.Encode() + + r, err := c.DoAPIGet(urlVal, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPostThread", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetPostsForChannel gets a page of posts with an array for ordering for a channel. +func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag string, collapsedThreads bool) (*PostList, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if collapsedThreads { + query += "&collapsedThreads=true" + } + r, err := c.DoAPIGet(c.channelRoute(channelId)+"/posts"+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPostsForChannel", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetPostsByIds gets a list of posts by taking an array of post ids +func (c *Client4) GetPostsByIds(postIds []string) ([]*Post, *Response, error) { + js, jsonErr := json.Marshal(postIds) + if jsonErr != nil { + return nil, nil, NewAppError("SearchFilesWithParams", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.postsRoute()+"/ids", string(js)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Post + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPostsByIds", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetFlaggedPostsForUser returns flagged posts of a user based on user id string. +func (c *Client4) GetFlaggedPostsForUser(userId string, page int, perPage int) (*PostList, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.userRoute(userId)+"/posts/flagged"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetFlaggedPostsForUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetFlaggedPostsForUserInTeam returns flagged posts in team of a user based on user id string. +func (c *Client4) GetFlaggedPostsForUserInTeam(userId string, teamId string, page int, perPage int) (*PostList, *Response, error) { + if !IsValidId(teamId) { + return nil, nil, NewAppError("GetFlaggedPostsForUserInTeam", "model.client.get_flagged_posts_in_team.missing_parameter.app_error", nil, "", http.StatusBadRequest) + } + + query := fmt.Sprintf("?team_id=%v&page=%v&per_page=%v", teamId, page, perPage) + r, err := c.DoAPIGet(c.userRoute(userId)+"/posts/flagged"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetFlaggedPostsForUserInTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetFlaggedPostsForUserInChannel returns flagged posts in channel of a user based on user id string. +func (c *Client4) GetFlaggedPostsForUserInChannel(userId string, channelId string, page int, perPage int) (*PostList, *Response, error) { + if !IsValidId(channelId) { + return nil, nil, NewAppError("GetFlaggedPostsForUserInChannel", "model.client.get_flagged_posts_in_channel.missing_parameter.app_error", nil, "", http.StatusBadRequest) + } + + query := fmt.Sprintf("?channel_id=%v&page=%v&per_page=%v", channelId, page, perPage) + r, err := c.DoAPIGet(c.userRoute(userId)+"/posts/flagged"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetFlaggedPostsForUserInChannel", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetPostsSince gets posts created after a specified time as Unix time in milliseconds. +func (c *Client4) GetPostsSince(channelId string, time int64, collapsedThreads bool) (*PostList, *Response, error) { + query := fmt.Sprintf("?since=%v", time) + if collapsedThreads { + query += "&collapsedThreads=true" + } + r, err := c.DoAPIGet(c.channelRoute(channelId)+"/posts"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPostsSince", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetPostsAfter gets a page of posts that were posted after the post provided. +func (c *Client4) GetPostsAfter(channelId, postId string, page, perPage int, etag string, collapsedThreads bool) (*PostList, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&after=%v", page, perPage, postId) + if collapsedThreads { + query += "&collapsedThreads=true" + } + r, err := c.DoAPIGet(c.channelRoute(channelId)+"/posts"+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPostsAfter", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetPostsBefore gets a page of posts that were posted before the post provided. +func (c *Client4) GetPostsBefore(channelId, postId string, page, perPage int, etag string, collapsedThreads bool) (*PostList, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&before=%v", page, perPage, postId) + if collapsedThreads { + query += "&collapsedThreads=true" + } + r, err := c.DoAPIGet(c.channelRoute(channelId)+"/posts"+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPostsBefore", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// GetPostsAroundLastUnread gets a list of posts around last unread post by a user in a channel. +func (c *Client4) GetPostsAroundLastUnread(userId, channelId string, limitBefore, limitAfter int, collapsedThreads bool) (*PostList, *Response, error) { + query := fmt.Sprintf("?limit_before=%v&limit_after=%v", limitBefore, limitAfter) + if collapsedThreads { + query += "&collapsedThreads=true" + } + r, err := c.DoAPIGet(c.userRoute(userId)+c.channelRoute(channelId)+"/posts/unread"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPostsAroundLastUnread", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// SearchFiles returns any posts with matching terms string. +func (c *Client4) SearchFiles(teamId string, terms string, isOrSearch bool) (*FileInfoList, *Response, error) { + params := SearchParameter{ + Terms: &terms, + IsOrSearch: &isOrSearch, + } + return c.SearchFilesWithParams(teamId, ¶ms) +} + +// SearchFilesWithParams returns any posts with matching terms string. +func (c *Client4) SearchFilesWithParams(teamId string, params *SearchParameter) (*FileInfoList, *Response, error) { + js, jsonErr := json.Marshal(params) + if jsonErr != nil { + return nil, nil, NewAppError("SearchFilesWithParams", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.teamRoute(teamId)+"/files/search", string(js)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var list FileInfoList + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("SearchFilesWithParams", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// SearchPosts returns any posts with matching terms string. +func (c *Client4) SearchPosts(teamId string, terms string, isOrSearch bool) (*PostList, *Response, error) { + params := SearchParameter{ + Terms: &terms, + IsOrSearch: &isOrSearch, + } + return c.SearchPostsWithParams(teamId, ¶ms) +} + +// SearchPostsWithParams returns any posts with matching terms string. +func (c *Client4) SearchPostsWithParams(teamId string, params *SearchParameter) (*PostList, *Response, error) { + js, jsonErr := json.Marshal(params) + if jsonErr != nil { + return nil, nil, NewAppError("SearchFilesWithParams", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + var route string + if teamId == "" { + route = c.postsRoute() + "/search" + } else { + route = c.teamRoute(teamId) + "/posts/search" + } + r, err := c.DoAPIPost(route, string(js)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PostList + if r.StatusCode == http.StatusNotModified { + return &list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("SearchFilesWithParams", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &list, BuildResponse(r), nil +} + +// SearchPostsWithMatches returns any posts with matching terms string, including. +func (c *Client4) SearchPostsWithMatches(teamId string, terms string, isOrSearch bool) (*PostSearchResults, *Response, error) { + requestBody := map[string]interface{}{"terms": terms, "is_or_search": isOrSearch} + var route string + if teamId == "" { + route = c.postsRoute() + "/search" + } else { + route = c.teamRoute(teamId) + "/posts/search" + } + r, err := c.DoAPIPost(route, StringInterfaceToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var psr PostSearchResults + if jsonErr := json.NewDecoder(r.Body).Decode(&psr); jsonErr != nil { + return nil, nil, NewAppError("SearchPostsWithMatches", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &psr, BuildResponse(r), nil +} + +// DoPostAction performs a post action. +func (c *Client4) DoPostAction(postId, actionId string) (*Response, error) { + r, err := c.DoAPIPost(c.postRoute(postId)+"/actions/"+actionId, "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DoPostActionWithCookie performs a post action with extra arguments +func (c *Client4) DoPostActionWithCookie(postId, actionId, selected, cookieStr string) (*Response, error) { + var body []byte + if selected != "" || cookieStr != "" { + body, _ = json.Marshal(DoPostActionRequest{ + SelectedOption: selected, + Cookie: cookieStr, + }) + } + r, err := c.DoAPIPost(c.postRoute(postId)+"/actions/"+actionId, string(body)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// OpenInteractiveDialog sends a WebSocket event to a user's clients to +// open interactive dialogs, based on the provided trigger ID and other +// provided data. Used with interactive message buttons, menus and +// slash commands. +func (c *Client4) OpenInteractiveDialog(request OpenDialogRequest) (*Response, error) { + b, _ := json.Marshal(request) + r, err := c.DoAPIPost("/actions/dialogs/open", string(b)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// SubmitInteractiveDialog will submit the provided dialog data to the integration +// configured by the URL. Used with the interactive dialogs integration feature. +func (c *Client4) SubmitInteractiveDialog(request SubmitDialogRequest) (*SubmitDialogResponse, *Response, error) { + b, _ := json.Marshal(request) + r, err := c.DoAPIPost("/actions/dialogs/submit", string(b)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var resp SubmitDialogResponse + json.NewDecoder(r.Body).Decode(&resp) + return &resp, BuildResponse(r), nil +} + +// UploadFile will upload a file to a channel using a multipart request, to be later attached to a post. +// This method is functionally equivalent to Client4.UploadFileAsRequestBody. +func (c *Client4) UploadFile(data []byte, channelId string, filename string) (*FileUploadResponse, *Response, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormField("channel_id") + if err != nil { + return nil, nil, err + } + + _, err = io.Copy(part, strings.NewReader(channelId)) + if err != nil { + return nil, nil, err + } + + part, err = writer.CreateFormFile("files", filename) + if err != nil { + return nil, nil, err + } + _, err = io.Copy(part, bytes.NewBuffer(data)) + if err != nil { + return nil, nil, err + } + + err = writer.Close() + if err != nil { + return nil, nil, err + } + + return c.DoUploadFile(c.filesRoute(), body.Bytes(), writer.FormDataContentType()) +} + +// UploadFileAsRequestBody will upload a file to a channel as the body of a request, to be later attached +// to a post. This method is functionally equivalent to Client4.UploadFile. +func (c *Client4) UploadFileAsRequestBody(data []byte, channelId string, filename string) (*FileUploadResponse, *Response, error) { + return c.DoUploadFile(c.filesRoute()+fmt.Sprintf("?channel_id=%v&filename=%v", url.QueryEscape(channelId), url.QueryEscape(filename)), data, http.DetectContentType(data)) +} + +// GetFile gets the bytes for a file by id. +func (c *Client4) GetFile(fileId string) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.fileRoute(fileId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetFile", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// DownloadFile gets the bytes for a file by id, optionally adding headers to force the browser to download it. +func (c *Client4) DownloadFile(fileId string, download bool) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.fileRoute(fileId)+fmt.Sprintf("?download=%v", download), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("DownloadFile", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// GetFileThumbnail gets the bytes for a file by id. +func (c *Client4) GetFileThumbnail(fileId string) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.fileRoute(fileId)+"/thumbnail", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetFileThumbnail", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// DownloadFileThumbnail gets the bytes for a file by id, optionally adding headers to force the browser to download it. +func (c *Client4) DownloadFileThumbnail(fileId string, download bool) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.fileRoute(fileId)+fmt.Sprintf("/thumbnail?download=%v", download), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("DownloadFileThumbnail", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// GetFileLink gets the public link of a file by id. +func (c *Client4) GetFileLink(fileId string) (string, *Response, error) { + r, err := c.DoAPIGet(c.fileRoute(fileId)+"/link", "") + if err != nil { + return "", BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body)["link"], BuildResponse(r), nil +} + +// GetFilePreview gets the bytes for a file by id. +func (c *Client4) GetFilePreview(fileId string) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.fileRoute(fileId)+"/preview", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetFilePreview", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// DownloadFilePreview gets the bytes for a file by id. +func (c *Client4) DownloadFilePreview(fileId string, download bool) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.fileRoute(fileId)+fmt.Sprintf("/preview?download=%v", download), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("DownloadFilePreview", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// GetFileInfo gets all the file info objects. +func (c *Client4) GetFileInfo(fileId string) (*FileInfo, *Response, error) { + r, err := c.DoAPIGet(c.fileRoute(fileId)+"/info", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var fi FileInfo + if jsonErr := json.NewDecoder(r.Body).Decode(&fi); jsonErr != nil { + return nil, nil, NewAppError("GetFileInfo", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &fi, BuildResponse(r), nil +} + +// GetFileInfosForPost gets all the file info objects attached to a post. +func (c *Client4) GetFileInfosForPost(postId string, etag string) ([]*FileInfo, *Response, error) { + r, err := c.DoAPIGet(c.postRoute(postId)+"/files/info", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var list []*FileInfo + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetFileInfosForPost", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// General/System Section + +// GenerateSupportPacket downloads the generated support packet +func (c *Client4) GenerateSupportPacket() ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.systemRoute()+"/support_packet", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetFile", "model.client.read_job_result_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// GetPing will return ok if the running goRoutines are below the threshold and unhealthy for above. +func (c *Client4) GetPing() (string, *Response, error) { + r, err := c.DoAPIGet(c.systemRoute()+"/ping", "") + if r != nil && r.StatusCode == 500 { + defer r.Body.Close() + return StatusUnhealthy, BuildResponse(r), err + } + if err != nil { + return "", BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body)["status"], BuildResponse(r), nil +} + +// GetPingWithServerStatus will return ok if several basic server health checks +// all pass successfully. +func (c *Client4) GetPingWithServerStatus() (string, *Response, error) { + r, err := c.DoAPIGet(c.systemRoute()+"/ping?get_server_status="+c.boolString(true), "") + if r != nil && r.StatusCode == 500 { + defer r.Body.Close() + return StatusUnhealthy, BuildResponse(r), err + } + if err != nil { + return "", BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body)["status"], BuildResponse(r), nil +} + +// GetPingWithFullServerStatus will return the full status if several basic server +// health checks all pass successfully. +func (c *Client4) GetPingWithFullServerStatus() (map[string]string, *Response, error) { + r, err := c.DoAPIGet(c.systemRoute()+"/ping?get_server_status="+c.boolString(true), "") + if r != nil && r.StatusCode == 500 { + defer r.Body.Close() + return map[string]string{"status": StatusUnhealthy}, BuildResponse(r), err + } + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body), BuildResponse(r), nil +} + +// TestEmail will attempt to connect to the configured SMTP server. +func (c *Client4) TestEmail(config *Config) (*Response, error) { + buf, err := json.Marshal(config) + if err != nil { + return nil, NewAppError("TestEmail", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.testEmailRoute(), buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// TestSiteURL will test the validity of a site URL. +func (c *Client4) TestSiteURL(siteURL string) (*Response, error) { + requestBody := make(map[string]string) + requestBody["site_url"] = siteURL + r, err := c.DoAPIPost(c.testSiteURLRoute(), MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// TestS3Connection will attempt to connect to the AWS S3. +func (c *Client4) TestS3Connection(config *Config) (*Response, error) { + buf, err := json.Marshal(config) + if err != nil { + return nil, NewAppError("TestS3Connection", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.testS3Route(), buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetConfig will retrieve the server config with some sanitized items. +func (c *Client4) GetConfig() (*Config, *Response, error) { + r, err := c.DoAPIGet(c.configRoute(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ConfigFromJSON(r.Body), BuildResponse(r), nil +} + +// ReloadConfig will reload the server configuration. +func (c *Client4) ReloadConfig() (*Response, error) { + r, err := c.DoAPIPost(c.configRoute()+"/reload", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetOldClientConfig will retrieve the parts of the server configuration needed by the +// client, formatted in the old format. +func (c *Client4) GetOldClientConfig(etag string) (map[string]string, *Response, error) { + r, err := c.DoAPIGet(c.configRoute()+"/client?format=old", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body), BuildResponse(r), nil +} + +// GetEnvironmentConfig will retrieve a map mirroring the server configuration where fields +// are set to true if the corresponding config setting is set through an environment variable. +// Settings that haven't been set through environment variables will be missing from the map. +func (c *Client4) GetEnvironmentConfig() (map[string]interface{}, *Response, error) { + r, err := c.DoAPIGet(c.configRoute()+"/environment", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return StringInterfaceFromJSON(r.Body), BuildResponse(r), nil +} + +// GetOldClientLicense will retrieve the parts of the server license needed by the +// client, formatted in the old format. +func (c *Client4) GetOldClientLicense(etag string) (map[string]string, *Response, error) { + r, err := c.DoAPIGet(c.licenseRoute()+"/client?format=old", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body), BuildResponse(r), nil +} + +// DatabaseRecycle will recycle the connections. Discard current connection and get new one. +func (c *Client4) DatabaseRecycle() (*Response, error) { + r, err := c.DoAPIPost(c.databaseRoute()+"/recycle", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// InvalidateCaches will purge the cache and can affect the performance while is cleaning. +func (c *Client4) InvalidateCaches() (*Response, error) { + r, err := c.DoAPIPost(c.cacheRoute()+"/invalidate", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateConfig will update the server configuration. +func (c *Client4) UpdateConfig(config *Config) (*Config, *Response, error) { + buf, err := json.Marshal(config) + if err != nil { + return nil, nil, NewAppError("UpdateConfig", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.configRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ConfigFromJSON(r.Body), BuildResponse(r), nil +} + +// MigrateConfig will migrate existing config to the new one. +// DEPRECATED: The config migrate API has been moved to be a purely +// mmctl --local endpoint. This method will be removed in a +// future major release. +func (c *Client4) MigrateConfig(from, to string) (*Response, error) { + m := make(map[string]string, 2) + m["from"] = from + m["to"] = to + r, err := c.DoAPIPost(c.configRoute()+"/migrate", MapToJSON(m)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UploadLicenseFile will add a license file to the system. +func (c *Client4) UploadLicenseFile(data []byte) (*Response, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("license", "test-license.mattermost-license") + if err != nil { + return nil, NewAppError("UploadLicenseFile", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, NewAppError("UploadLicenseFile", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if err = writer.Close(); err != nil { + return nil, NewAppError("UploadLicenseFile", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest) + } + + rq, err := http.NewRequest("POST", c.APIURL+c.licenseRoute(), bytes.NewReader(body.Bytes())) + if err != nil { + return nil, err + } + rq.Header.Set("Content-Type", writer.FormDataContentType()) + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + return BuildResponse(rp), nil +} + +// RemoveLicenseFile will remove the server license it exists. Note that this will +// disable all enterprise features. +func (c *Client4) RemoveLicenseFile() (*Response, error) { + r, err := c.DoAPIDelete(c.licenseRoute()) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetAnalyticsOld will retrieve analytics using the old format. New format is not +// available but the "/analytics" endpoint is reserved for it. The "name" argument is optional +// and defaults to "standard". The "teamId" argument is optional and will limit results +// to a specific team. +func (c *Client4) GetAnalyticsOld(name, teamId string) (AnalyticsRows, *Response, error) { + query := fmt.Sprintf("?name=%v&team_id=%v", name, teamId) + r, err := c.DoAPIGet(c.analyticsRoute()+"/old"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var rows AnalyticsRows + err = json.NewDecoder(r.Body).Decode(&rows) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetAnalyticsOld", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return rows, BuildResponse(r), nil +} + +// Webhooks Section + +// CreateIncomingWebhook creates an incoming webhook for a channel. +func (c *Client4) CreateIncomingWebhook(hook *IncomingWebhook) (*IncomingWebhook, *Response, error) { + buf, err := json.Marshal(hook) + if err != nil { + return nil, nil, NewAppError("CreateIncomingWebhook", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.incomingWebhooksRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var iw IncomingWebhook + if jsonErr := json.NewDecoder(r.Body).Decode(&iw); jsonErr != nil { + return nil, nil, NewAppError("CreateIncomingWebhook", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &iw, BuildResponse(r), nil +} + +// UpdateIncomingWebhook updates an incoming webhook for a channel. +func (c *Client4) UpdateIncomingWebhook(hook *IncomingWebhook) (*IncomingWebhook, *Response, error) { + buf, err := json.Marshal(hook) + if err != nil { + return nil, nil, NewAppError("UpdateIncomingWebhook", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.incomingWebhookRoute(hook.Id), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var iw IncomingWebhook + if jsonErr := json.NewDecoder(r.Body).Decode(&iw); jsonErr != nil { + return nil, nil, NewAppError("UpdateIncomingWebhook", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &iw, BuildResponse(r), nil +} + +// GetIncomingWebhooks returns a page of incoming webhooks on the system. Page counting starts at 0. +func (c *Client4) GetIncomingWebhooks(page int, perPage int, etag string) ([]*IncomingWebhook, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.incomingWebhooksRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var iwl []*IncomingWebhook + if r.StatusCode == http.StatusNotModified { + return iwl, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&iwl); jsonErr != nil { + return nil, nil, NewAppError("GetIncomingWebhooks", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return iwl, BuildResponse(r), nil +} + +// GetIncomingWebhooksForTeam returns a page of incoming webhooks for a team. Page counting starts at 0. +func (c *Client4) GetIncomingWebhooksForTeam(teamId string, page int, perPage int, etag string) ([]*IncomingWebhook, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&team_id=%v", page, perPage, teamId) + r, err := c.DoAPIGet(c.incomingWebhooksRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var iwl []*IncomingWebhook + if r.StatusCode == http.StatusNotModified { + return iwl, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&iwl); jsonErr != nil { + return nil, nil, NewAppError("GetIncomingWebhooksForTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return iwl, BuildResponse(r), nil +} + +// GetIncomingWebhook returns an Incoming webhook given the hook ID. +func (c *Client4) GetIncomingWebhook(hookID string, etag string) (*IncomingWebhook, *Response, error) { + r, err := c.DoAPIGet(c.incomingWebhookRoute(hookID), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var iw IncomingWebhook + if r.StatusCode == http.StatusNotModified { + return &iw, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&iw); jsonErr != nil { + return nil, nil, NewAppError("GetIncomingWebhook", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &iw, BuildResponse(r), nil +} + +// DeleteIncomingWebhook deletes and Incoming Webhook given the hook ID. +func (c *Client4) DeleteIncomingWebhook(hookID string) (*Response, error) { + r, err := c.DoAPIDelete(c.incomingWebhookRoute(hookID)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// CreateOutgoingWebhook creates an outgoing webhook for a team or channel. +func (c *Client4) CreateOutgoingWebhook(hook *OutgoingWebhook) (*OutgoingWebhook, *Response, error) { + buf, err := json.Marshal(hook) + if err != nil { + return nil, nil, NewAppError("CreateOutgoingWebhook", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.outgoingWebhooksRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var ow OutgoingWebhook + if jsonErr := json.NewDecoder(r.Body).Decode(&ow); jsonErr != nil { + return nil, nil, NewAppError("CreateOutgoingWebhook", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &ow, BuildResponse(r), nil +} + +// UpdateOutgoingWebhook creates an outgoing webhook for a team or channel. +func (c *Client4) UpdateOutgoingWebhook(hook *OutgoingWebhook) (*OutgoingWebhook, *Response, error) { + buf, err := json.Marshal(hook) + if err != nil { + return nil, nil, NewAppError("UpdateOutgoingWebhook", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.outgoingWebhookRoute(hook.Id), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var ow OutgoingWebhook + if jsonErr := json.NewDecoder(r.Body).Decode(&ow); jsonErr != nil { + return nil, nil, NewAppError("UpdateOutgoingWebhook", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &ow, BuildResponse(r), nil +} + +// GetOutgoingWebhooks returns a page of outgoing webhooks on the system. Page counting starts at 0. +func (c *Client4) GetOutgoingWebhooks(page int, perPage int, etag string) ([]*OutgoingWebhook, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.outgoingWebhooksRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var owl []*OutgoingWebhook + if r.StatusCode == http.StatusNotModified { + return owl, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&owl); jsonErr != nil { + return nil, nil, NewAppError("GetOutgoingWebhooks", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return owl, BuildResponse(r), nil +} + +// GetOutgoingWebhook outgoing webhooks on the system requested by Hook Id. +func (c *Client4) GetOutgoingWebhook(hookId string) (*OutgoingWebhook, *Response, error) { + r, err := c.DoAPIGet(c.outgoingWebhookRoute(hookId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var ow OutgoingWebhook + if jsonErr := json.NewDecoder(r.Body).Decode(&ow); jsonErr != nil { + return nil, nil, NewAppError("GetOutgoingWebhook", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &ow, BuildResponse(r), nil +} + +// GetOutgoingWebhooksForChannel returns a page of outgoing webhooks for a channel. Page counting starts at 0. +func (c *Client4) GetOutgoingWebhooksForChannel(channelId string, page int, perPage int, etag string) ([]*OutgoingWebhook, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&channel_id=%v", page, perPage, channelId) + r, err := c.DoAPIGet(c.outgoingWebhooksRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var owl []*OutgoingWebhook + if r.StatusCode == http.StatusNotModified { + return owl, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&owl); jsonErr != nil { + return nil, nil, NewAppError("GetOutgoingWebhooksForChannel", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return owl, BuildResponse(r), nil +} + +// GetOutgoingWebhooksForTeam returns a page of outgoing webhooks for a team. Page counting starts at 0. +func (c *Client4) GetOutgoingWebhooksForTeam(teamId string, page int, perPage int, etag string) ([]*OutgoingWebhook, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&team_id=%v", page, perPage, teamId) + r, err := c.DoAPIGet(c.outgoingWebhooksRoute()+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var owl []*OutgoingWebhook + if r.StatusCode == http.StatusNotModified { + return owl, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&owl); jsonErr != nil { + return nil, nil, NewAppError("GetOutgoingWebhooksForTeam", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return owl, BuildResponse(r), nil +} + +// RegenOutgoingHookToken regenerate the outgoing webhook token. +func (c *Client4) RegenOutgoingHookToken(hookId string) (*OutgoingWebhook, *Response, error) { + r, err := c.DoAPIPost(c.outgoingWebhookRoute(hookId)+"/regen_token", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var ow OutgoingWebhook + if jsonErr := json.NewDecoder(r.Body).Decode(&ow); jsonErr != nil { + return nil, nil, NewAppError("RegenOutgoingHookToken", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &ow, BuildResponse(r), nil +} + +// DeleteOutgoingWebhook delete the outgoing webhook on the system requested by Hook Id. +func (c *Client4) DeleteOutgoingWebhook(hookId string) (*Response, error) { + r, err := c.DoAPIDelete(c.outgoingWebhookRoute(hookId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// Preferences Section + +// GetPreferences returns the user's preferences. +func (c *Client4) GetPreferences(userId string) (Preferences, *Response, error) { + r, err := c.DoAPIGet(c.preferencesRoute(userId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var prefs Preferences + if jsonErr := json.NewDecoder(r.Body).Decode(&prefs); jsonErr != nil { + return nil, nil, NewAppError("GetPreferences", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return prefs, BuildResponse(r), nil +} + +// UpdatePreferences saves the user's preferences. +func (c *Client4) UpdatePreferences(userId string, preferences Preferences) (*Response, error) { + buf, err := json.Marshal(preferences) + if err != nil { + return nil, NewAppError("UpdatePreferences", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.preferencesRoute(userId), buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DeletePreferences deletes the user's preferences. +func (c *Client4) DeletePreferences(userId string, preferences Preferences) (*Response, error) { + buf, err := json.Marshal(preferences) + if err != nil { + return nil, NewAppError("DeletePreferences", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.preferencesRoute(userId)+"/delete", buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetPreferencesByCategory returns the user's preferences from the provided category string. +func (c *Client4) GetPreferencesByCategory(userId string, category string) (Preferences, *Response, error) { + url := fmt.Sprintf(c.preferencesRoute(userId)+"/%s", category) + r, err := c.DoAPIGet(url, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var prefs Preferences + if jsonErr := json.NewDecoder(r.Body).Decode(&prefs); jsonErr != nil { + return nil, nil, NewAppError("GetPreferencesByCategory", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return prefs, BuildResponse(r), nil +} + +// GetPreferenceByCategoryAndName returns the user's preferences from the provided category and preference name string. +func (c *Client4) GetPreferenceByCategoryAndName(userId string, category string, preferenceName string) (*Preference, *Response, error) { + url := fmt.Sprintf(c.preferencesRoute(userId)+"/%s/name/%v", category, preferenceName) + r, err := c.DoAPIGet(url, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var pref Preference + if jsonErr := json.NewDecoder(r.Body).Decode(&pref); jsonErr != nil { + return nil, nil, NewAppError("GetPreferenceByCategoryAndName", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &pref, BuildResponse(r), nil +} + +// SAML Section + +// GetSamlMetadata returns metadata for the SAML configuration. +func (c *Client4) GetSamlMetadata() (string, *Response, error) { + r, err := c.DoAPIGet(c.samlRoute()+"/metadata", "") + if err != nil { + return "", BuildResponse(r), err + } + defer closeBody(r) + + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(r.Body) + if err != nil { + return "", BuildResponse(r), err + } + + return buf.String(), BuildResponse(r), nil +} + +func fileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("certificate", filename) + if err != nil { + return nil, nil, err + } + + if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, nil, err + } + + if err := writer.Close(); err != nil { + return nil, nil, err + } + + return body.Bytes(), writer, nil +} + +// UploadSamlIdpCertificate will upload an IDP certificate for SAML and set the config to use it. +// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. +func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (*Response, error) { + body, writer, err := fileToMultipart(data, filename) + if err != nil { + return nil, NewAppError("UploadSamlIdpCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest) + } + + _, resp, err := c.DoUploadFile(c.samlRoute()+"/certificate/idp", body, writer.FormDataContentType()) + return resp, err +} + +// UploadSamlPublicCertificate will upload a public certificate for SAML and set the config to use it. +// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. +func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (*Response, error) { + body, writer, err := fileToMultipart(data, filename) + if err != nil { + return nil, NewAppError("UploadSamlPublicCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest) + } + + _, resp, err := c.DoUploadFile(c.samlRoute()+"/certificate/public", body, writer.FormDataContentType()) + return resp, err +} + +// UploadSamlPrivateCertificate will upload a private key for SAML and set the config to use it. +// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. +func (c *Client4) UploadSamlPrivateCertificate(data []byte, filename string) (*Response, error) { + body, writer, err := fileToMultipart(data, filename) + if err != nil { + return nil, NewAppError("UploadSamlPrivateCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest) + } + + _, resp, err := c.DoUploadFile(c.samlRoute()+"/certificate/private", body, writer.FormDataContentType()) + return resp, err +} + +// DeleteSamlIdpCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. +func (c *Client4) DeleteSamlIdpCertificate() (*Response, error) { + r, err := c.DoAPIDelete(c.samlRoute() + "/certificate/idp") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DeleteSamlPublicCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. +func (c *Client4) DeleteSamlPublicCertificate() (*Response, error) { + r, err := c.DoAPIDelete(c.samlRoute() + "/certificate/public") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DeleteSamlPrivateCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. +func (c *Client4) DeleteSamlPrivateCertificate() (*Response, error) { + r, err := c.DoAPIDelete(c.samlRoute() + "/certificate/private") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetSamlCertificateStatus returns metadata for the SAML configuration. +func (c *Client4) GetSamlCertificateStatus() (*SamlCertificateStatus, *Response, error) { + r, err := c.DoAPIGet(c.samlRoute()+"/certificate/status", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var status SamlCertificateStatus + if jsonErr := json.NewDecoder(r.Body).Decode(&status); jsonErr != nil { + return nil, nil, NewAppError("GetSamlCertificateStatus", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &status, BuildResponse(r), nil +} + +func (c *Client4) GetSamlMetadataFromIdp(samlMetadataURL string) (*SamlMetadataResponse, *Response, error) { + requestBody := make(map[string]string) + requestBody["saml_metadata_url"] = samlMetadataURL + r, err := c.DoAPIPost(c.samlRoute()+"/metadatafromidp", MapToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + + defer closeBody(r) + var resp SamlMetadataResponse + if jsonErr := json.NewDecoder(r.Body).Decode(&resp); jsonErr != nil { + return nil, nil, NewAppError("GetSamlMetadataFromIdp", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &resp, BuildResponse(r), nil +} + +// ResetSamlAuthDataToEmail resets the AuthData field of SAML users to their Email. +func (c *Client4) ResetSamlAuthDataToEmail(includeDeleted bool, dryRun bool, userIDs []string) (int64, *Response, error) { + params := map[string]interface{}{ + "include_deleted": includeDeleted, + "dry_run": dryRun, + "user_ids": userIDs, + } + b, _ := json.Marshal(params) + r, err := c.DoAPIPostBytes(c.samlRoute()+"/reset_auth_data", b) + if err != nil { + return 0, BuildResponse(r), err + } + defer closeBody(r) + respBody := map[string]int64{} + err = json.NewDecoder(r.Body).Decode(&respBody) + if err != nil { + return 0, BuildResponse(r), NewAppError("Api4.ResetSamlAuthDataToEmail", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return respBody["num_affected"], BuildResponse(r), nil +} + +// Compliance Section + +// CreateComplianceReport creates an incoming webhook for a channel. +func (c *Client4) CreateComplianceReport(report *Compliance) (*Compliance, *Response, error) { + buf, err := json.Marshal(report) + if err != nil { + return nil, nil, NewAppError("CreateComplianceReport", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.complianceReportsRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var comp Compliance + if jsonErr := json.NewDecoder(r.Body).Decode(&comp); jsonErr != nil { + return nil, nil, NewAppError("CreateComplianceReport", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &comp, BuildResponse(r), nil +} + +// GetComplianceReports returns list of compliance reports. +func (c *Client4) GetComplianceReports(page, perPage int) (Compliances, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.complianceReportsRoute()+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var comp Compliances + if jsonErr := json.NewDecoder(r.Body).Decode(&comp); jsonErr != nil { + return nil, nil, NewAppError("GetComplianceReports", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return comp, BuildResponse(r), nil +} + +// GetComplianceReport returns a compliance report. +func (c *Client4) GetComplianceReport(reportId string) (*Compliance, *Response, error) { + r, err := c.DoAPIGet(c.complianceReportRoute(reportId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var comp Compliance + if jsonErr := json.NewDecoder(r.Body).Decode(&comp); jsonErr != nil { + return nil, nil, NewAppError("GetComplianceReport", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &comp, BuildResponse(r), nil +} + +// DownloadComplianceReport returns a full compliance report as a file. +func (c *Client4) DownloadComplianceReport(reportId string) ([]byte, *Response, error) { + rq, err := http.NewRequest("GET", c.APIURL+c.complianceReportDownloadRoute(reportId), nil) + if err != nil { + return nil, nil, err + } + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, "BEARER "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return nil, BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + data, err := ioutil.ReadAll(rp.Body) + if err != nil { + return nil, BuildResponse(rp), NewAppError("DownloadComplianceReport", "model.client.read_file.app_error", nil, err.Error(), rp.StatusCode) + } + + return data, BuildResponse(rp), nil +} + +// Cluster Section + +// GetClusterStatus returns the status of all the configured cluster nodes. +func (c *Client4) GetClusterStatus() ([]*ClusterInfo, *Response, error) { + r, err := c.DoAPIGet(c.clusterRoute()+"/status", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*ClusterInfo + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetClusterStatus", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// LDAP Section + +// SyncLdap will force a sync with the configured LDAP server. +// If includeRemovedMembers is true, then group members who left or were removed from a +// synced team/channel will be re-joined; otherwise, they will be excluded. +func (c *Client4) SyncLdap(includeRemovedMembers bool) (*Response, error) { + reqBody, _ := json.Marshal(map[string]interface{}{ + "include_removed_members": includeRemovedMembers, + }) + r, err := c.DoAPIPostBytes(c.ldapRoute()+"/sync", reqBody) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// TestLdap will attempt to connect to the configured LDAP server and return OK if configured +// correctly. +func (c *Client4) TestLdap() (*Response, error) { + r, err := c.DoAPIPost(c.ldapRoute()+"/test", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetLdapGroups retrieves the immediate child groups of the given parent group. +func (c *Client4) GetLdapGroups() ([]*Group, *Response, error) { + path := fmt.Sprintf("%s/groups", c.ldapRoute()) + + r, err := c.DoAPIGet(path, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + responseData := struct { + Count int `json:"count"` + Groups []*Group `json:"groups"` + }{} + if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { + return nil, BuildResponse(r), NewAppError("Api4.GetLdapGroups", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + for i := range responseData.Groups { + responseData.Groups[i].DisplayName = *responseData.Groups[i].Name + } + + return responseData.Groups, BuildResponse(r), nil +} + +// LinkLdapGroup creates or undeletes a Mattermost group and associates it to the given LDAP group DN. +func (c *Client4) LinkLdapGroup(dn string) (*Group, *Response, error) { + path := fmt.Sprintf("%s/groups/%s/link", c.ldapRoute(), dn) + + r, err := c.DoAPIPost(path, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var g Group + if jsonErr := json.NewDecoder(r.Body).Decode(&g); jsonErr != nil { + return nil, nil, NewAppError("LinkLdapGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &g, BuildResponse(r), nil +} + +// UnlinkLdapGroup deletes the Mattermost group associated with the given LDAP group DN. +func (c *Client4) UnlinkLdapGroup(dn string) (*Group, *Response, error) { + path := fmt.Sprintf("%s/groups/%s/link", c.ldapRoute(), dn) + + r, err := c.DoAPIDelete(path) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var g Group + if jsonErr := json.NewDecoder(r.Body).Decode(&g); jsonErr != nil { + return nil, nil, NewAppError("UnlinkLdapGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &g, BuildResponse(r), nil +} + +// MigrateIdLdap migrates the LDAP enabled users to given attribute +func (c *Client4) MigrateIdLdap(toAttribute string) (*Response, error) { + r, err := c.DoAPIPost(c.ldapRoute()+"/migrateid", MapToJSON(map[string]string{ + "toAttribute": toAttribute, + })) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetGroupsByChannel retrieves the Mattermost Groups associated with a given channel +func (c *Client4) GetGroupsByChannel(channelId string, opts GroupSearchOpts) ([]*GroupWithSchemeAdmin, int, *Response, error) { + path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v&filter_allow_reference=%v", c.channelRoute(channelId), opts.Q, opts.IncludeMemberCount, opts.FilterAllowReference) + if opts.PageOpts != nil { + path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) + } + r, err := c.DoAPIGet(path, "") + if err != nil { + return nil, 0, BuildResponse(r), err + } + defer closeBody(r) + + responseData := struct { + Groups []*GroupWithSchemeAdmin `json:"groups"` + Count int `json:"total_group_count"` + }{} + if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { + return nil, 0, BuildResponse(r), NewAppError("Api4.GetGroupsByChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return responseData.Groups, responseData.Count, BuildResponse(r), nil +} + +// GetGroupsByTeam retrieves the Mattermost Groups associated with a given team +func (c *Client4) GetGroupsByTeam(teamId string, opts GroupSearchOpts) ([]*GroupWithSchemeAdmin, int, *Response, error) { + path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v&filter_allow_reference=%v", c.teamRoute(teamId), opts.Q, opts.IncludeMemberCount, opts.FilterAllowReference) + if opts.PageOpts != nil { + path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) + } + r, err := c.DoAPIGet(path, "") + if err != nil { + return nil, 0, BuildResponse(r), err + } + defer closeBody(r) + + responseData := struct { + Groups []*GroupWithSchemeAdmin `json:"groups"` + Count int `json:"total_group_count"` + }{} + if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { + return nil, 0, BuildResponse(r), NewAppError("Api4.GetGroupsByTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return responseData.Groups, responseData.Count, BuildResponse(r), nil +} + +// GetGroupsAssociatedToChannelsByTeam retrieves the Mattermost Groups associated with channels in a given team +func (c *Client4) GetGroupsAssociatedToChannelsByTeam(teamId string, opts GroupSearchOpts) (map[string][]*GroupWithSchemeAdmin, *Response, error) { + path := fmt.Sprintf("%s/groups_by_channels?q=%v&filter_allow_reference=%v", c.teamRoute(teamId), opts.Q, opts.FilterAllowReference) + if opts.PageOpts != nil { + path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) + } + r, err := c.DoAPIGet(path, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + responseData := struct { + GroupsAssociatedToChannels map[string][]*GroupWithSchemeAdmin `json:"groups"` + }{} + if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { + return nil, BuildResponse(r), NewAppError("Api4.GetGroupsAssociatedToChannelsByTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return responseData.GroupsAssociatedToChannels, BuildResponse(r), nil +} + +// GetGroups retrieves Mattermost Groups +func (c *Client4) GetGroups(opts GroupSearchOpts) ([]*Group, *Response, error) { + path := fmt.Sprintf( + "%s?include_member_count=%v¬_associated_to_team=%v¬_associated_to_channel=%v&filter_allow_reference=%v&q=%v&filter_parent_team_permitted=%v&group_source=%v", + c.groupsRoute(), + opts.IncludeMemberCount, + opts.NotAssociatedToTeam, + opts.NotAssociatedToChannel, + opts.FilterAllowReference, + opts.Q, + opts.FilterParentTeamPermitted, + opts.Source, + ) + if opts.Since > 0 { + path = fmt.Sprintf("%s&since=%v", path, opts.Since) + } + if opts.PageOpts != nil { + path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) + } + r, err := c.DoAPIGet(path, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var list []*Group + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetGroups", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetGroupsByUserId retrieves Mattermost Groups for a user +func (c *Client4) GetGroupsByUserId(userId string) ([]*Group, *Response, error) { + path := fmt.Sprintf( + "%s/%v/groups", + c.usersRoute(), + userId, + ) + + r, err := c.DoAPIGet(path, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Group + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetGroupsByUserId", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +func (c *Client4) MigrateAuthToLdap(fromAuthService string, matchField string, force bool) (*Response, error) { + r, err := c.DoAPIPost(c.usersRoute()+"/migrate_auth/ldap", StringInterfaceToJSON(map[string]interface{}{ + "from": fromAuthService, + "force": force, + "match_field": matchField, + })) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +func (c *Client4) MigrateAuthToSaml(fromAuthService string, usersMap map[string]string, auto bool) (*Response, error) { + r, err := c.DoAPIPost(c.usersRoute()+"/migrate_auth/saml", StringInterfaceToJSON(map[string]interface{}{ + "from": fromAuthService, + "auto": auto, + "matches": usersMap, + })) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UploadLdapPublicCertificate will upload a public certificate for LDAP and set the config to use it. +func (c *Client4) UploadLdapPublicCertificate(data []byte) (*Response, error) { + body, writer, err := fileToMultipart(data, LdapPublicCertificateName) + if err != nil { + return nil, NewAppError("UploadLdapPublicCertificate", "model.client.upload_ldap_cert.app_error", nil, err.Error(), http.StatusBadRequest) + } + + _, resp, err := c.DoUploadFile(c.ldapRoute()+"/certificate/public", body, writer.FormDataContentType()) + return resp, err +} + +// UploadLdapPrivateCertificate will upload a private key for LDAP and set the config to use it. +func (c *Client4) UploadLdapPrivateCertificate(data []byte) (*Response, error) { + body, writer, err := fileToMultipart(data, LdapPrivateKeyName) + if err != nil { + return nil, NewAppError("UploadLdapPrivateCertificate", "model.client.upload_Ldap_cert.app_error", nil, err.Error(), http.StatusBadRequest) + } + + _, resp, err := c.DoUploadFile(c.ldapRoute()+"/certificate/private", body, writer.FormDataContentType()) + return resp, err +} + +// DeleteLdapPublicCertificate deletes the LDAP IDP certificate from the server and updates the config to not use it and disable LDAP. +func (c *Client4) DeleteLdapPublicCertificate() (*Response, error) { + r, err := c.DoAPIDelete(c.ldapRoute() + "/certificate/public") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DeleteLDAPPrivateCertificate deletes the LDAP IDP certificate from the server and updates the config to not use it and disable LDAP. +func (c *Client4) DeleteLdapPrivateCertificate() (*Response, error) { + r, err := c.DoAPIDelete(c.ldapRoute() + "/certificate/private") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// Audits Section + +// GetAudits returns a list of audits for the whole system. +func (c *Client4) GetAudits(page int, perPage int, etag string) (Audits, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet("/audits"+query, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var audits Audits + err = json.NewDecoder(r.Body).Decode(&audits) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetAudits", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return audits, BuildResponse(r), nil +} + +// Brand Section + +// GetBrandImage retrieves the previously uploaded brand image. +func (c *Client4) GetBrandImage() ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.brandRoute()+"/image", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + if r.StatusCode >= 300 { + return nil, BuildResponse(r), AppErrorFromJSON(r.Body) + } + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetBrandImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + + return data, BuildResponse(r), nil +} + +// DeleteBrandImage deletes the brand image for the system. +func (c *Client4) DeleteBrandImage() (*Response, error) { + r, err := c.DoAPIDelete(c.brandRoute() + "/image") + if err != nil { + return BuildResponse(r), err + } + return BuildResponse(r), nil +} + +// UploadBrandImage sets the brand image for the system. +func (c *Client4) UploadBrandImage(data []byte) (*Response, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("image", "brand.png") + if err != nil { + return nil, NewAppError("UploadBrandImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, NewAppError("UploadBrandImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if err = writer.Close(); err != nil { + return nil, NewAppError("UploadBrandImage", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest) + } + + rq, err := http.NewRequest("POST", c.APIURL+c.brandRoute()+"/image", bytes.NewReader(body.Bytes())) + if err != nil { + return nil, err + } + rq.Header.Set("Content-Type", writer.FormDataContentType()) + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + return BuildResponse(rp), nil +} + +// Logs Section + +// GetLogs page of logs as a string array. +func (c *Client4) GetLogs(page, perPage int) ([]string, *Response, error) { + query := fmt.Sprintf("?page=%v&logs_per_page=%v", page, perPage) + r, err := c.DoAPIGet("/logs"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ArrayFromJSON(r.Body), BuildResponse(r), nil +} + +// PostLog is a convenience Web Service call so clients can log messages into +// the server-side logs. For example we typically log javascript error messages +// into the server-side. It returns the log message if the logging was successful. +func (c *Client4) PostLog(message map[string]string) (map[string]string, *Response, error) { + r, err := c.DoAPIPost("/logs", MapToJSON(message)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body), BuildResponse(r), nil +} + +// OAuth Section + +// CreateOAuthApp will register a new OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) CreateOAuthApp(app *OAuthApp) (*OAuthApp, *Response, error) { + buf, err := json.Marshal(app) + if err != nil { + return nil, nil, NewAppError("CreateOAuthApp", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.oAuthAppsRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var oapp OAuthApp + if jsonErr := json.NewDecoder(r.Body).Decode(&oapp); jsonErr != nil { + return nil, nil, NewAppError("CreateOAuthApp", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &oapp, BuildResponse(r), nil +} + +// UpdateOAuthApp updates a page of registered OAuth 2.0 client applications with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) UpdateOAuthApp(app *OAuthApp) (*OAuthApp, *Response, error) { + buf, err := json.Marshal(app) + if err != nil { + return nil, nil, NewAppError("UpdateOAuthApp", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.oAuthAppRoute(app.Id), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var oapp OAuthApp + if jsonErr := json.NewDecoder(r.Body).Decode(&oapp); jsonErr != nil { + return nil, nil, NewAppError("UpdateOAuthApp", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &oapp, BuildResponse(r), nil +} + +// GetOAuthApps gets a page of registered OAuth 2.0 client applications with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) GetOAuthApps(page, perPage int) ([]*OAuthApp, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.oAuthAppsRoute()+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*OAuthApp + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetOAuthApps", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetOAuthApp gets a registered OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) GetOAuthApp(appId string) (*OAuthApp, *Response, error) { + r, err := c.DoAPIGet(c.oAuthAppRoute(appId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var oapp OAuthApp + if jsonErr := json.NewDecoder(r.Body).Decode(&oapp); jsonErr != nil { + return nil, nil, NewAppError("GetOAuthApp", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &oapp, BuildResponse(r), nil +} + +// GetOAuthAppInfo gets a sanitized version of a registered OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) GetOAuthAppInfo(appId string) (*OAuthApp, *Response, error) { + r, err := c.DoAPIGet(c.oAuthAppRoute(appId)+"/info", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var oapp OAuthApp + if jsonErr := json.NewDecoder(r.Body).Decode(&oapp); jsonErr != nil { + return nil, nil, NewAppError("GetOAuthAppInfo", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &oapp, BuildResponse(r), nil +} + +// DeleteOAuthApp deletes a registered OAuth 2.0 client application. +func (c *Client4) DeleteOAuthApp(appId string) (*Response, error) { + r, err := c.DoAPIDelete(c.oAuthAppRoute(appId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// RegenerateOAuthAppSecret regenerates the client secret for a registered OAuth 2.0 client application. +func (c *Client4) RegenerateOAuthAppSecret(appId string) (*OAuthApp, *Response, error) { + r, err := c.DoAPIPost(c.oAuthAppRoute(appId)+"/regen_secret", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var oapp OAuthApp + if jsonErr := json.NewDecoder(r.Body).Decode(&oapp); jsonErr != nil { + return nil, nil, NewAppError("RegenerateOAuthAppSecret", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &oapp, BuildResponse(r), nil +} + +// GetAuthorizedOAuthAppsForUser gets a page of OAuth 2.0 client applications the user has authorized to use access their account. +func (c *Client4) GetAuthorizedOAuthAppsForUser(userId string, page, perPage int) ([]*OAuthApp, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.userRoute(userId)+"/oauth/apps/authorized"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*OAuthApp + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetAuthorizedOAuthAppsForUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// AuthorizeOAuthApp will authorize an OAuth 2.0 client application to access a user's account and provide a redirect link to follow. +func (c *Client4) AuthorizeOAuthApp(authRequest *AuthorizeRequest) (string, *Response, error) { + buf, err := json.Marshal(authRequest) + if err != nil { + return "", BuildResponse(nil), NewAppError("AuthorizeOAuthApp", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIRequestBytes(http.MethodPost, c.URL+"/oauth/authorize", buf, "") + if err != nil { + return "", BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body)["redirect"], BuildResponse(r), nil +} + +// DeauthorizeOAuthApp will deauthorize an OAuth 2.0 client application from accessing a user's account. +func (c *Client4) DeauthorizeOAuthApp(appId string) (*Response, error) { + requestData := map[string]string{"client_id": appId} + r, err := c.DoAPIRequest(http.MethodPost, c.URL+"/oauth/deauthorize", MapToJSON(requestData), "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetOAuthAccessToken is a test helper function for the OAuth access token endpoint. +func (c *Client4) GetOAuthAccessToken(data url.Values) (*AccessResponse, *Response, error) { + url := c.URL + "/oauth/access_token" + rq, err := http.NewRequest(http.MethodPost, url, strings.NewReader(data.Encode())) + if err != nil { + return nil, nil, err + } + rq.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return nil, BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + var ar *AccessResponse + err = json.NewDecoder(rp.Body).Decode(&ar) + if err != nil { + return nil, BuildResponse(rp), NewAppError(url, "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + return ar, BuildResponse(rp), nil +} + +// Elasticsearch Section + +// TestElasticsearch will attempt to connect to the configured Elasticsearch server and return OK if configured. +// correctly. +func (c *Client4) TestElasticsearch() (*Response, error) { + r, err := c.DoAPIPost(c.elasticsearchRoute()+"/test", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// PurgeElasticsearchIndexes immediately deletes all Elasticsearch indexes. +func (c *Client4) PurgeElasticsearchIndexes() (*Response, error) { + r, err := c.DoAPIPost(c.elasticsearchRoute()+"/purge_indexes", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// Bleve Section + +// PurgeBleveIndexes immediately deletes all Bleve indexes. +func (c *Client4) PurgeBleveIndexes() (*Response, error) { + r, err := c.DoAPIPost(c.bleveRoute()+"/purge_indexes", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// Data Retention Section + +// GetDataRetentionPolicy will get the current global data retention policy details. +func (c *Client4) GetDataRetentionPolicy() (*GlobalRetentionPolicy, *Response, error) { + r, err := c.DoAPIGet(c.dataRetentionRoute()+"/policy", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p GlobalRetentionPolicy + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("GetDataRetentionPolicy", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// GetDataRetentionPolicyByID will get the details for the granular data retention policy with the specified ID. +func (c *Client4) GetDataRetentionPolicyByID(policyID string) (*RetentionPolicyWithTeamAndChannelCounts, *Response, error) { + r, err := c.DoAPIGet(c.dataRetentionPolicyRoute(policyID), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var p RetentionPolicyWithTeamAndChannelCounts + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("GetDataRetentionPolicyByID", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// GetDataRetentionPoliciesCount will get the total number of granular data retention policies. +func (c *Client4) GetDataRetentionPoliciesCount() (int64, *Response, error) { + type CountBody struct { + TotalCount int64 `json:"total_count"` + } + r, err := c.DoAPIGet(c.dataRetentionRoute()+"/policies_count", "") + if err != nil { + return 0, BuildResponse(r), err + } + var countObj CountBody + err = json.NewDecoder(r.Body).Decode(&countObj) + if err != nil { + return 0, nil, NewAppError("Client4.GetDataRetentionPoliciesCount", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return countObj.TotalCount, BuildResponse(r), nil +} + +// GetDataRetentionPolicies will get the current granular data retention policies' details. +func (c *Client4) GetDataRetentionPolicies(page, perPage int) (*RetentionPolicyWithTeamAndChannelCountsList, *Response, error) { + query := fmt.Sprintf("?page=%d&per_page=%d", page, perPage) + r, err := c.DoAPIGet(c.dataRetentionRoute()+"/policies"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var p RetentionPolicyWithTeamAndChannelCountsList + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("GetDataRetentionPolicies", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// CreateDataRetentionPolicy will create a new granular data retention policy which will be applied to +// the specified teams and channels. The Id field of `policy` must be empty. +func (c *Client4) CreateDataRetentionPolicy(policy *RetentionPolicyWithTeamAndChannelIDs) (*RetentionPolicyWithTeamAndChannelCounts, *Response, error) { + policyJSON, jsonErr := json.Marshal(policy) + if jsonErr != nil { + return nil, nil, NewAppError("CreateDataRetentionPolicy", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.dataRetentionRoute()+"/policies", policyJSON) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p RetentionPolicyWithTeamAndChannelCounts + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("CreateDataRetentionPolicy", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// DeleteDataRetentionPolicy will delete the granular data retention policy with the specified ID. +func (c *Client4) DeleteDataRetentionPolicy(policyID string) (*Response, error) { + r, err := c.DoAPIDelete(c.dataRetentionPolicyRoute(policyID)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// PatchDataRetentionPolicy will patch the granular data retention policy with the specified ID. +// The Id field of `patch` must be non-empty. +func (c *Client4) PatchDataRetentionPolicy(patch *RetentionPolicyWithTeamAndChannelIDs) (*RetentionPolicyWithTeamAndChannelCounts, *Response, error) { + patchJSON, jsonErr := json.Marshal(patch) + if jsonErr != nil { + return nil, nil, NewAppError("PatchDataRetentionPolicy", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPatchBytes(c.dataRetentionPolicyRoute(patch.ID), patchJSON) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p RetentionPolicyWithTeamAndChannelCounts + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("PatchDataRetentionPolicy", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +// GetTeamsForRetentionPolicy will get the teams to which the specified policy is currently applied. +func (c *Client4) GetTeamsForRetentionPolicy(policyID string, page, perPage int) (*TeamsWithCount, *Response, error) { + query := fmt.Sprintf("?page=%d&per_page=%d", page, perPage) + r, err := c.DoAPIGet(c.dataRetentionPolicyRoute(policyID)+"/teams"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + var teams *TeamsWithCount + err = json.NewDecoder(r.Body).Decode(&teams) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.GetTeamsForRetentionPolicy", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return teams, BuildResponse(r), nil +} + +// SearchTeamsForRetentionPolicy will search the teams to which the specified policy is currently applied. +func (c *Client4) SearchTeamsForRetentionPolicy(policyID string, term string) ([]*Team, *Response, error) { + body, _ := json.Marshal(map[string]interface{}{"term": term}) + r, err := c.DoAPIPostBytes(c.dataRetentionPolicyRoute(policyID)+"/teams/search", body) + if err != nil { + return nil, BuildResponse(r), err + } + var teams []*Team + err = json.NewDecoder(r.Body).Decode(&teams) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.SearchTeamsForRetentionPolicy", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return teams, BuildResponse(r), nil +} + +// AddTeamsToRetentionPolicy will add the specified teams to the granular data retention policy +// with the specified ID. +func (c *Client4) AddTeamsToRetentionPolicy(policyID string, teamIDs []string) (*Response, error) { + body, _ := json.Marshal(teamIDs) + r, err := c.DoAPIPostBytes(c.dataRetentionPolicyRoute(policyID)+"/teams", body) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// RemoveTeamsFromRetentionPolicy will remove the specified teams from the granular data retention policy +// with the specified ID. +func (c *Client4) RemoveTeamsFromRetentionPolicy(policyID string, teamIDs []string) (*Response, error) { + body, _ := json.Marshal(teamIDs) + r, err := c.DoAPIDeleteBytes(c.dataRetentionPolicyRoute(policyID)+"/teams", body) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetChannelsForRetentionPolicy will get the channels to which the specified policy is currently applied. +func (c *Client4) GetChannelsForRetentionPolicy(policyID string, page, perPage int) (*ChannelsWithCount, *Response, error) { + query := fmt.Sprintf("?page=%d&per_page=%d", page, perPage) + r, err := c.DoAPIGet(c.dataRetentionPolicyRoute(policyID)+"/channels"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + var channels *ChannelsWithCount + err = json.NewDecoder(r.Body).Decode(&channels) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.GetChannelsForRetentionPolicy", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return channels, BuildResponse(r), nil +} + +// SearchChannelsForRetentionPolicy will search the channels to which the specified policy is currently applied. +func (c *Client4) SearchChannelsForRetentionPolicy(policyID string, term string) (ChannelListWithTeamData, *Response, error) { + body, _ := json.Marshal(map[string]interface{}{"term": term}) + r, err := c.DoAPIPostBytes(c.dataRetentionPolicyRoute(policyID)+"/channels/search", body) + if err != nil { + return nil, BuildResponse(r), err + } + var channels ChannelListWithTeamData + err = json.NewDecoder(r.Body).Decode(&channels) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.SearchChannelsForRetentionPolicy", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return channels, BuildResponse(r), nil +} + +// AddChannelsToRetentionPolicy will add the specified channels to the granular data retention policy +// with the specified ID. +func (c *Client4) AddChannelsToRetentionPolicy(policyID string, channelIDs []string) (*Response, error) { + body, _ := json.Marshal(channelIDs) + r, err := c.DoAPIPostBytes(c.dataRetentionPolicyRoute(policyID)+"/channels", body) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// RemoveChannelsFromRetentionPolicy will remove the specified channels from the granular data retention policy +// with the specified ID. +func (c *Client4) RemoveChannelsFromRetentionPolicy(policyID string, channelIDs []string) (*Response, error) { + body, _ := json.Marshal(channelIDs) + r, err := c.DoAPIDeleteBytes(c.dataRetentionPolicyRoute(policyID)+"/channels", body) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetTeamPoliciesForUser will get the data retention policies for the teams to which a user belongs. +func (c *Client4) GetTeamPoliciesForUser(userID string, offset, limit int) (*RetentionPolicyForTeamList, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userID)+"/data_retention/team_policies", "") + if err != nil { + return nil, BuildResponse(r), err + } + var teams RetentionPolicyForTeamList + err = json.NewDecoder(r.Body).Decode(&teams) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.GetTeamPoliciesForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return &teams, BuildResponse(r), nil +} + +// GetChannelPoliciesForUser will get the data retention policies for the channels to which a user belongs. +func (c *Client4) GetChannelPoliciesForUser(userID string, offset, limit int) (*RetentionPolicyForChannelList, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userID)+"/data_retention/channel_policies", "") + if err != nil { + return nil, BuildResponse(r), err + } + var channels RetentionPolicyForChannelList + err = json.NewDecoder(r.Body).Decode(&channels) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.GetChannelPoliciesForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return &channels, BuildResponse(r), nil +} + +// Commands Section + +// CreateCommand will create a new command if the user have the right permissions. +func (c *Client4) CreateCommand(cmd *Command) (*Command, *Response, error) { + buf, err := json.Marshal(cmd) + if err != nil { + return nil, nil, NewAppError("CreateCommand", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.commandsRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var command Command + if jsonErr := json.NewDecoder(r.Body).Decode(&command); jsonErr != nil { + return nil, nil, NewAppError("CreateCommand", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &command, BuildResponse(r), nil +} + +// UpdateCommand updates a command based on the provided Command struct. +func (c *Client4) UpdateCommand(cmd *Command) (*Command, *Response, error) { + buf, err := json.Marshal(cmd) + if err != nil { + return nil, nil, NewAppError("UpdateCommand", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.commandRoute(cmd.Id), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var command Command + if jsonErr := json.NewDecoder(r.Body).Decode(&command); jsonErr != nil { + return nil, nil, NewAppError("UpdateCommand", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &command, BuildResponse(r), nil +} + +// MoveCommand moves a command to a different team. +func (c *Client4) MoveCommand(teamId string, commandId string) (*Response, error) { + cmr := CommandMoveRequest{TeamId: teamId} + buf, err := json.Marshal(cmr) + if err != nil { + return nil, NewAppError("MoveCommand", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.commandMoveRoute(commandId), buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DeleteCommand deletes a command based on the provided command id string. +func (c *Client4) DeleteCommand(commandId string) (*Response, error) { + r, err := c.DoAPIDelete(c.commandRoute(commandId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// ListCommands will retrieve a list of commands available in the team. +func (c *Client4) ListCommands(teamId string, customOnly bool) ([]*Command, *Response, error) { + query := fmt.Sprintf("?team_id=%v&custom_only=%v", teamId, customOnly) + r, err := c.DoAPIGet(c.commandsRoute()+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var list []*Command + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("ListCommands", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// ListCommandAutocompleteSuggestions will retrieve a list of suggestions for a userInput. +func (c *Client4) ListCommandAutocompleteSuggestions(userInput, teamId string) ([]AutocompleteSuggestion, *Response, error) { + query := fmt.Sprintf("/commands/autocomplete_suggestions?user_input=%v", userInput) + r, err := c.DoAPIGet(c.teamRoute(teamId)+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []AutocompleteSuggestion + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("ListCommandAutocompleteSuggestions", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetCommandById will retrieve a command by id. +func (c *Client4) GetCommandById(cmdId string) (*Command, *Response, error) { + url := fmt.Sprintf("%s/%s", c.commandsRoute(), cmdId) + r, err := c.DoAPIGet(url, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var command Command + if jsonErr := json.NewDecoder(r.Body).Decode(&command); jsonErr != nil { + return nil, nil, NewAppError("GetCommandById", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &command, BuildResponse(r), nil +} + +// ExecuteCommand executes a given slash command. +func (c *Client4) ExecuteCommand(channelId, command string) (*CommandResponse, *Response, error) { + commandArgs := &CommandArgs{ + ChannelId: channelId, + Command: command, + } + buf, err := json.Marshal(commandArgs) + if err != nil { + return nil, nil, NewAppError("ExecuteCommand", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.commandsRoute()+"/execute", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + response, err := CommandResponseFromJSON(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("ExecuteCommand", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return response, BuildResponse(r), nil +} + +// ExecuteCommandWithTeam executes a given slash command against the specified team. +// Use this when executing slash commands in a DM/GM, since the team id cannot be inferred in that case. +func (c *Client4) ExecuteCommandWithTeam(channelId, teamId, command string) (*CommandResponse, *Response, error) { + commandArgs := &CommandArgs{ + ChannelId: channelId, + TeamId: teamId, + Command: command, + } + buf, err := json.Marshal(commandArgs) + if err != nil { + return nil, nil, NewAppError("ExecuteCommandWithTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.commandsRoute()+"/execute", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + response, err := CommandResponseFromJSON(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("ExecuteCommandWithTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return response, BuildResponse(r), nil +} + +// ListAutocompleteCommands will retrieve a list of commands available in the team. +func (c *Client4) ListAutocompleteCommands(teamId string) ([]*Command, *Response, error) { + r, err := c.DoAPIGet(c.teamAutoCompleteCommandsRoute(teamId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Command + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("ListAutocompleteCommands", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// RegenCommandToken will create a new token if the user have the right permissions. +func (c *Client4) RegenCommandToken(commandId string) (string, *Response, error) { + r, err := c.DoAPIPut(c.commandRoute(commandId)+"/regen_token", "") + if err != nil { + return "", BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body)["token"], BuildResponse(r), nil +} + +// Status Section + +// GetUserStatus returns a user based on the provided user id string. +func (c *Client4) GetUserStatus(userId, etag string) (*Status, *Response, error) { + r, err := c.DoAPIGet(c.userStatusRoute(userId), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var s Status + if r.StatusCode == http.StatusNotModified { + return &s, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil { + return nil, nil, NewAppError("GetUserStatus", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &s, BuildResponse(r), nil +} + +// GetUsersStatusesByIds returns a list of users status based on the provided user ids. +func (c *Client4) GetUsersStatusesByIds(userIds []string) ([]*Status, *Response, error) { + r, err := c.DoAPIPost(c.userStatusesRoute()+"/ids", ArrayToJSON(userIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Status + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsersStatusesByIds", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// UpdateUserStatus sets a user's status based on the provided user id string. +func (c *Client4) UpdateUserStatus(userId string, userStatus *Status) (*Status, *Response, error) { + buf, err := json.Marshal(userStatus) + if err != nil { + return nil, nil, NewAppError("UpdateUserStatus", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.userStatusRoute(userId), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var s Status + if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil { + return nil, nil, NewAppError("UpdateUserStatus", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &s, BuildResponse(r), nil +} + +// UpdateUserCustomStatus sets a user's custom status based on the provided user id string. +func (c *Client4) UpdateUserCustomStatus(userId string, userCustomStatus *CustomStatus) (*CustomStatus, *Response, error) { + buf, err := json.Marshal(userCustomStatus) + if err != nil { + return nil, nil, NewAppError("UpdateUserCustomStatus", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.userStatusRoute(userId)+"/custom", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var s CustomStatus + if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil { + return nil, nil, NewAppError("UpdateUserCustomStatus", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &s, BuildResponse(r), nil +} + +// RemoveUserCustomStatus remove a user's custom status based on the provided user id string. +func (c *Client4) RemoveUserCustomStatus(userId string) (*Response, error) { + r, err := c.DoAPIDelete(c.userStatusRoute(userId) + "/custom") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// RemoveRecentUserCustomStatus remove a recent user's custom status based on the provided user id string. +func (c *Client4) RemoveRecentUserCustomStatus(userId string) (*Response, error) { + r, err := c.DoAPIDelete(c.userStatusRoute(userId) + "/custom/recent") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// Emoji Section + +// CreateEmoji will save an emoji to the server if the current user has permission +// to do so. If successful, the provided emoji will be returned with its Id field +// filled in. Otherwise, an error will be returned. +func (c *Client4) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emoji, *Response, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("image", filename) + if err != nil { + return nil, nil, err + } + + if _, err := io.Copy(part, bytes.NewBuffer(image)); err != nil { + return nil, nil, err + } + + emojiJSON, jsonErr := json.Marshal(emoji) + if jsonErr != nil { + return nil, nil, NewAppError("CreateEmoji", "api.marshal_error", nil, jsonErr.Error(), 0) + } + + if err := writer.WriteField("emoji", string(emojiJSON)); err != nil { + return nil, nil, err + } + + if err := writer.Close(); err != nil { + return nil, nil, err + } + + return c.DoEmojiUploadFile(c.emojisRoute(), body.Bytes(), writer.FormDataContentType()) +} + +// GetEmojiList returns a page of custom emoji on the system. +func (c *Client4) GetEmojiList(page, perPage int) ([]*Emoji, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.emojisRoute()+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var list []*Emoji + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetEmojiList", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetSortedEmojiList returns a page of custom emoji on the system sorted based on the sort +// parameter, blank for no sorting and "name" to sort by emoji names. +func (c *Client4) GetSortedEmojiList(page, perPage int, sort string) ([]*Emoji, *Response, error) { + query := fmt.Sprintf("?page=%v&per_page=%v&sort=%v", page, perPage, sort) + r, err := c.DoAPIGet(c.emojisRoute()+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Emoji + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetSortedEmojiList", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// DeleteEmoji delete an custom emoji on the provided emoji id string. +func (c *Client4) DeleteEmoji(emojiId string) (*Response, error) { + r, err := c.DoAPIDelete(c.emojiRoute(emojiId)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetEmoji returns a custom emoji based on the emojiId string. +func (c *Client4) GetEmoji(emojiId string) (*Emoji, *Response, error) { + r, err := c.DoAPIGet(c.emojiRoute(emojiId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var e Emoji + if jsonErr := json.NewDecoder(r.Body).Decode(&e); jsonErr != nil { + return nil, nil, NewAppError("GetEmoji", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &e, BuildResponse(r), nil +} + +// GetEmojiByName returns a custom emoji based on the name string. +func (c *Client4) GetEmojiByName(name string) (*Emoji, *Response, error) { + r, err := c.DoAPIGet(c.emojiByNameRoute(name), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var e Emoji + if jsonErr := json.NewDecoder(r.Body).Decode(&e); jsonErr != nil { + return nil, nil, NewAppError("GetEmojiByName", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &e, BuildResponse(r), nil +} + +// GetEmojiImage returns the emoji image. +func (c *Client4) GetEmojiImage(emojiId string) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.emojiRoute(emojiId)+"/image", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetEmojiImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode) + } + + return data, BuildResponse(r), nil +} + +// SearchEmoji returns a list of emoji matching some search criteria. +func (c *Client4) SearchEmoji(search *EmojiSearch) ([]*Emoji, *Response, error) { + buf, err := json.Marshal(search) + if err != nil { + return nil, nil, NewAppError("SearchEmoji", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.emojisRoute()+"/search", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Emoji + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("SearchEmoji", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// AutocompleteEmoji returns a list of emoji starting with or matching name. +func (c *Client4) AutocompleteEmoji(name string, etag string) ([]*Emoji, *Response, error) { + query := fmt.Sprintf("?name=%v", name) + r, err := c.DoAPIGet(c.emojisRoute()+"/autocomplete"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Emoji + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("AutocompleteEmoji", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// Reaction Section + +// SaveReaction saves an emoji reaction for a post. Returns the saved reaction if successful, otherwise an error will be returned. +func (c *Client4) SaveReaction(reaction *Reaction) (*Reaction, *Response, error) { + buf, err := json.Marshal(reaction) + if err != nil { + return nil, nil, NewAppError("SaveReaction", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.reactionsRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var re Reaction + if jsonErr := json.NewDecoder(r.Body).Decode(&re); jsonErr != nil { + return nil, nil, NewAppError("SaveReaction", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &re, BuildResponse(r), nil +} + +// GetReactions returns a list of reactions to a post. +func (c *Client4) GetReactions(postId string) ([]*Reaction, *Response, error) { + r, err := c.DoAPIGet(c.postRoute(postId)+"/reactions", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Reaction + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetReactions", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// DeleteReaction deletes reaction of a user in a post. +func (c *Client4) DeleteReaction(reaction *Reaction) (*Response, error) { + r, err := c.DoAPIDelete(c.userRoute(reaction.UserId) + c.postRoute(reaction.PostId) + fmt.Sprintf("/reactions/%v", reaction.EmojiName)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// FetchBulkReactions returns a map of postIds and corresponding reactions +func (c *Client4) GetBulkReactions(postIds []string) (map[string][]*Reaction, *Response, error) { + r, err := c.DoAPIPost(c.postsRoute()+"/ids/reactions", ArrayToJSON(postIds)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + reactions := map[string][]*Reaction{} + if jsonErr := json.NewDecoder(r.Body).Decode(&reactions); jsonErr != nil { + return nil, nil, NewAppError("GetBulkReactions", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return reactions, BuildResponse(r), nil +} + +func (c *Client4) GetTopReactionsForTeamSince(teamId string, timeRange string, page int, perPage int) (*TopReactionList, *Response, error) { + query := fmt.Sprintf("?time_range=%v&page=%v&per_page=%v", timeRange, page, perPage) + r, err := c.DoAPIGet(c.teamRoute(teamId)+"/top/reactions"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var topReactions *TopReactionList + if jsonErr := json.NewDecoder(r.Body).Decode(&topReactions); jsonErr != nil { + return nil, nil, NewAppError("GetTopReactionsForTeamSince", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return topReactions, BuildResponse(r), nil +} + +func (c *Client4) GetTopReactionsForUserSince(teamId string, timeRange string, page int, perPage int) (*TopReactionList, *Response, error) { + query := fmt.Sprintf("?time_range=%v&page=%v&per_page=%v", timeRange, page, perPage) + + if teamId != "" { + query += fmt.Sprintf("&team_id=%v", teamId) + } + + r, err := c.DoAPIGet(c.usersRoute()+"/me/top/reactions"+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var topReactions *TopReactionList + if jsonErr := json.NewDecoder(r.Body).Decode(&topReactions); jsonErr != nil { + return nil, nil, NewAppError("GetTopReactionsForUserSince", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return topReactions, BuildResponse(r), nil +} + +// Timezone Section + +// GetSupportedTimezone returns a page of supported timezones on the system. +func (c *Client4) GetSupportedTimezone() ([]string, *Response, error) { + r, err := c.DoAPIGet(c.timezonesRoute(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var timezones []string + json.NewDecoder(r.Body).Decode(&timezones) + return timezones, BuildResponse(r), nil +} + +// Open Graph Metadata Section + +// OpenGraph return the open graph metadata for a particular url if the site have the metadata. +func (c *Client4) OpenGraph(url string) (map[string]string, *Response, error) { + requestBody := make(map[string]string) + requestBody["url"] = url + + r, err := c.DoAPIPost(c.openGraphRoute(), MapToJSON(requestBody)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body), BuildResponse(r), nil +} + +// Jobs Section + +// GetJob gets a single job. +func (c *Client4) GetJob(id string) (*Job, *Response, error) { + r, err := c.DoAPIGet(c.jobsRoute()+fmt.Sprintf("/%v", id), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var j Job + if jsonErr := json.NewDecoder(r.Body).Decode(&j); jsonErr != nil { + return nil, nil, NewAppError("GetJob", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &j, BuildResponse(r), nil +} + +// GetJobs gets all jobs, sorted with the job that was created most recently first. +func (c *Client4) GetJobs(page int, perPage int) ([]*Job, *Response, error) { + r, err := c.DoAPIGet(c.jobsRoute()+fmt.Sprintf("?page=%v&per_page=%v", page, perPage), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Job + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetJobs", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetJobsByType gets all jobs of a given type, sorted with the job that was created most recently first. +func (c *Client4) GetJobsByType(jobType string, page int, perPage int) ([]*Job, *Response, error) { + r, err := c.DoAPIGet(c.jobsRoute()+fmt.Sprintf("/type/%v?page=%v&per_page=%v", jobType, page, perPage), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Job + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetJobsByType", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// CreateJob creates a job based on the provided job struct. +func (c *Client4) CreateJob(job *Job) (*Job, *Response, error) { + buf, err := json.Marshal(job) + if err != nil { + return nil, nil, NewAppError("CreateJob", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.jobsRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var j Job + if jsonErr := json.NewDecoder(r.Body).Decode(&j); jsonErr != nil { + return nil, nil, NewAppError("CreateJob", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &j, BuildResponse(r), nil +} + +// CancelJob requests the cancellation of the job with the provided Id. +func (c *Client4) CancelJob(jobId string) (*Response, error) { + r, err := c.DoAPIPost(c.jobsRoute()+fmt.Sprintf("/%v/cancel", jobId), "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DownloadJob downloads the results of the job +func (c *Client4) DownloadJob(jobId string) ([]byte, *Response, error) { + r, err := c.DoAPIGet(c.jobsRoute()+fmt.Sprintf("/%v/download", jobId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetFile", "model.client.read_job_result_file.app_error", nil, err.Error(), r.StatusCode) + } + return data, BuildResponse(r), nil +} + +// Roles Section + +// GetAllRoles returns a list of all the roles. +func (c *Client4) GetAllRoles() ([]*Role, *Response, error) { + r, err := c.DoAPIGet(c.rolesRoute(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Role + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetAllRoles", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetRole gets a single role by ID. +func (c *Client4) GetRole(id string) (*Role, *Response, error) { + r, err := c.DoAPIGet(c.rolesRoute()+fmt.Sprintf("/%v", id), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var role Role + if jsonErr := json.NewDecoder(r.Body).Decode(&role); jsonErr != nil { + return nil, nil, NewAppError("GetRole", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &role, BuildResponse(r), nil +} + +// GetRoleByName gets a single role by Name. +func (c *Client4) GetRoleByName(name string) (*Role, *Response, error) { + r, err := c.DoAPIGet(c.rolesRoute()+fmt.Sprintf("/name/%v", name), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var role Role + if jsonErr := json.NewDecoder(r.Body).Decode(&role); jsonErr != nil { + return nil, nil, NewAppError("GetRoleByName", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &role, BuildResponse(r), nil +} + +// GetRolesByNames returns a list of roles based on the provided role names. +func (c *Client4) GetRolesByNames(roleNames []string) ([]*Role, *Response, error) { + r, err := c.DoAPIPost(c.rolesRoute()+"/names", ArrayToJSON(roleNames)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Role + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetRolesByNames", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// PatchRole partially updates a role in the system. Any missing fields are not updated. +func (c *Client4) PatchRole(roleId string, patch *RolePatch) (*Role, *Response, error) { + buf, err := json.Marshal(patch) + if err != nil { + return nil, nil, NewAppError("PatchRole", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.rolesRoute()+fmt.Sprintf("/%v/patch", roleId), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var role Role + if jsonErr := json.NewDecoder(r.Body).Decode(&role); jsonErr != nil { + return nil, nil, NewAppError("PatchRole", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &role, BuildResponse(r), nil +} + +// Schemes Section + +// CreateScheme creates a new Scheme. +func (c *Client4) CreateScheme(scheme *Scheme) (*Scheme, *Response, error) { + buf, err := json.Marshal(scheme) + if err != nil { + return nil, nil, NewAppError("CreateScheme", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.schemesRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var s Scheme + if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil { + return nil, nil, NewAppError("CreateScheme", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &s, BuildResponse(r), nil +} + +// GetScheme gets a single scheme by ID. +func (c *Client4) GetScheme(id string) (*Scheme, *Response, error) { + r, err := c.DoAPIGet(c.schemeRoute(id), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var s Scheme + if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil { + return nil, nil, NewAppError("GetScheme", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &s, BuildResponse(r), nil +} + +// GetSchemes ets all schemes, sorted with the most recently created first, optionally filtered by scope. +func (c *Client4) GetSchemes(scope string, page int, perPage int) ([]*Scheme, *Response, error) { + r, err := c.DoAPIGet(c.schemesRoute()+fmt.Sprintf("?scope=%v&page=%v&per_page=%v", scope, page, perPage), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Scheme + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetSchemes", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// DeleteScheme deletes a single scheme by ID. +func (c *Client4) DeleteScheme(id string) (*Response, error) { + r, err := c.DoAPIDelete(c.schemeRoute(id)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// PatchScheme partially updates a scheme in the system. Any missing fields are not updated. +func (c *Client4) PatchScheme(id string, patch *SchemePatch) (*Scheme, *Response, error) { + buf, err := json.Marshal(patch) + if err != nil { + return nil, nil, NewAppError("PatchScheme", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.schemeRoute(id)+"/patch", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var s Scheme + if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil { + return nil, nil, NewAppError("PatchScheme", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &s, BuildResponse(r), nil +} + +// GetTeamsForScheme gets the teams using this scheme, sorted alphabetically by display name. +func (c *Client4) GetTeamsForScheme(schemeId string, page int, perPage int) ([]*Team, *Response, error) { + r, err := c.DoAPIGet(c.schemeRoute(schemeId)+fmt.Sprintf("/teams?page=%v&per_page=%v", page, perPage), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*Team + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetTeamsForScheme", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// GetChannelsForScheme gets the channels using this scheme, sorted alphabetically by display name. +func (c *Client4) GetChannelsForScheme(schemeId string, page int, perPage int) (ChannelList, *Response, error) { + r, err := c.DoAPIGet(c.schemeRoute(schemeId)+fmt.Sprintf("/channels?page=%v&per_page=%v", page, perPage), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch ChannelList + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelsForScheme", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// Plugin Section + +// UploadPlugin takes an io.Reader stream pointing to the contents of a .tar.gz plugin. +func (c *Client4) UploadPlugin(file io.Reader) (*Manifest, *Response, error) { + return c.uploadPlugin(file, false) +} + +func (c *Client4) UploadPluginForced(file io.Reader) (*Manifest, *Response, error) { + return c.uploadPlugin(file, true) +} + +func (c *Client4) uploadPlugin(file io.Reader, force bool) (*Manifest, *Response, error) { + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + + if force { + err := writer.WriteField("force", c.boolString(true)) + if err != nil { + return nil, nil, err + } + } + + part, err := writer.CreateFormFile("plugin", "plugin.tar.gz") + if err != nil { + return nil, nil, err + } + + if _, err = io.Copy(part, file); err != nil { + return nil, nil, err + } + + if err = writer.Close(); err != nil { + return nil, nil, err + } + + rq, err := http.NewRequest("POST", c.APIURL+c.pluginsRoute(), body) + if err != nil { + return nil, nil, err + } + rq.Header.Set("Content-Type", writer.FormDataContentType()) + + if c.AuthToken != "" { + rq.Header.Set(HeaderAuth, c.AuthType+" "+c.AuthToken) + } + + rp, err := c.HTTPClient.Do(rq) + if err != nil { + return nil, BuildResponse(rp), err + } + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildResponse(rp), AppErrorFromJSON(rp.Body) + } + + var m Manifest + if jsonErr := json.NewDecoder(rp.Body).Decode(&m); jsonErr != nil { + return nil, nil, NewAppError("uploadPlugin", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &m, BuildResponse(rp), nil +} + +func (c *Client4) InstallPluginFromURL(downloadURL string, force bool) (*Manifest, *Response, error) { + forceStr := c.boolString(force) + + url := fmt.Sprintf("%s?plugin_download_url=%s&force=%s", c.pluginsRoute()+"/install_from_url", url.QueryEscape(downloadURL), forceStr) + r, err := c.DoAPIPost(url, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var m Manifest + if jsonErr := json.NewDecoder(r.Body).Decode(&m); jsonErr != nil { + return nil, nil, NewAppError("InstallPluginFromUrl", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &m, BuildResponse(r), nil +} + +// InstallMarketplacePlugin will install marketplace plugin. +func (c *Client4) InstallMarketplacePlugin(request *InstallMarketplacePluginRequest) (*Manifest, *Response, error) { + buf, err := json.Marshal(request) + if err != nil { + return nil, nil, NewAppError("InstallMarketplacePlugin", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.pluginsRoute()+"/marketplace", string(buf)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var m Manifest + if jsonErr := json.NewDecoder(r.Body).Decode(&m); jsonErr != nil { + return nil, nil, NewAppError("InstallMarketplacePlugin", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &m, BuildResponse(r), nil +} + +// GetPlugins will return a list of plugin manifests for currently active plugins. +func (c *Client4) GetPlugins() (*PluginsResponse, *Response, error) { + r, err := c.DoAPIGet(c.pluginsRoute(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var resp PluginsResponse + if jsonErr := json.NewDecoder(r.Body).Decode(&resp); jsonErr != nil { + return nil, nil, NewAppError("GetPlugins", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &resp, BuildResponse(r), nil +} + +// GetPluginStatuses will return the plugins installed on any server in the cluster, for reporting +// to the administrator via the system console. +func (c *Client4) GetPluginStatuses() (PluginStatuses, *Response, error) { + r, err := c.DoAPIGet(c.pluginsRoute()+"/statuses", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list PluginStatuses + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetPluginStatuses", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// RemovePlugin will disable and delete a plugin. +func (c *Client4) RemovePlugin(id string) (*Response, error) { + r, err := c.DoAPIDelete(c.pluginRoute(id)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetWebappPlugins will return a list of plugins that the webapp should download. +func (c *Client4) GetWebappPlugins() ([]*Manifest, *Response, error) { + r, err := c.DoAPIGet(c.pluginsRoute()+"/webapp", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var list []*Manifest + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetWebappPlugins", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// EnablePlugin will enable an plugin installed. +func (c *Client4) EnablePlugin(id string) (*Response, error) { + r, err := c.DoAPIPost(c.pluginRoute(id)+"/enable", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// DisablePlugin will disable an enabled plugin. +func (c *Client4) DisablePlugin(id string) (*Response, error) { + r, err := c.DoAPIPost(c.pluginRoute(id)+"/disable", "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetMarketplacePlugins will return a list of plugins that an admin can install. +func (c *Client4) GetMarketplacePlugins(filter *MarketplacePluginFilter) ([]*MarketplacePlugin, *Response, error) { + route := c.pluginsRoute() + "/marketplace" + u, err := url.Parse(route) + if err != nil { + return nil, nil, err + } + + filter.ApplyToURL(u) + + r, err := c.DoAPIGet(u.String(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + plugins, err := MarketplacePluginsFromReader(r.Body) + if err != nil { + return nil, BuildResponse(r), NewAppError(route, "model.client.parse_plugins.app_error", nil, err.Error(), http.StatusBadRequest) + } + + return plugins, BuildResponse(r), nil +} + +// UpdateChannelScheme will update a channel's scheme. +func (c *Client4) UpdateChannelScheme(channelId, schemeId string) (*Response, error) { + sip := &SchemeIDPatch{SchemeID: &schemeId} + buf, err := json.Marshal(sip) + if err != nil { + return nil, NewAppError("UpdateChannelScheme", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.channelSchemeRoute(channelId), buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// UpdateTeamScheme will update a team's scheme. +func (c *Client4) UpdateTeamScheme(teamId, schemeId string) (*Response, error) { + sip := &SchemeIDPatch{SchemeID: &schemeId} + buf, err := json.Marshal(sip) + if err != nil { + return nil, NewAppError("UpdateTeamScheme", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.teamSchemeRoute(teamId), buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetRedirectLocation retrieves the value of the 'Location' header of an HTTP response for a given URL. +func (c *Client4) GetRedirectLocation(urlParam, etag string) (string, *Response, error) { + url := fmt.Sprintf("%s?url=%s", c.redirectLocationRoute(), url.QueryEscape(urlParam)) + r, err := c.DoAPIGet(url, etag) + if err != nil { + return "", BuildResponse(r), err + } + defer closeBody(r) + return MapFromJSON(r.Body)["location"], BuildResponse(r), nil +} + +// SetServerBusy will mark the server as busy, which disables non-critical services for `secs` seconds. +func (c *Client4) SetServerBusy(secs int) (*Response, error) { + url := fmt.Sprintf("%s?seconds=%d", c.serverBusyRoute(), secs) + r, err := c.DoAPIPost(url, "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// ClearServerBusy will mark the server as not busy. +func (c *Client4) ClearServerBusy() (*Response, error) { + r, err := c.DoAPIDelete(c.serverBusyRoute()) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetServerBusy returns the current ServerBusyState including the time when a server marked busy +// will automatically have the flag cleared. +func (c *Client4) GetServerBusy() (*ServerBusyState, *Response, error) { + r, err := c.DoAPIGet(c.serverBusyRoute(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var sbs ServerBusyState + if jsonErr := json.NewDecoder(r.Body).Decode(&sbs); jsonErr != nil { + return nil, nil, NewAppError("GetServerBusy", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &sbs, BuildResponse(r), nil +} + +// RegisterTermsOfServiceAction saves action performed by a user against a specific terms of service. +func (c *Client4) RegisterTermsOfServiceAction(userId, termsOfServiceId string, accepted bool) (*Response, error) { + url := c.userTermsOfServiceRoute(userId) + data := map[string]interface{}{"termsOfServiceId": termsOfServiceId, "accepted": accepted} + r, err := c.DoAPIPost(url, StringInterfaceToJSON(data)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetTermsOfService fetches the latest terms of service +func (c *Client4) GetTermsOfService(etag string) (*TermsOfService, *Response, error) { + url := c.termsOfServiceRoute() + r, err := c.DoAPIGet(url, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tos TermsOfService + if jsonErr := json.NewDecoder(r.Body).Decode(&tos); jsonErr != nil { + return nil, nil, NewAppError("GetTermsOfService", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &tos, BuildResponse(r), nil +} + +// GetUserTermsOfService fetches user's latest terms of service action if the latest action was for acceptance. +func (c *Client4) GetUserTermsOfService(userId, etag string) (*UserTermsOfService, *Response, error) { + url := c.userTermsOfServiceRoute(userId) + r, err := c.DoAPIGet(url, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var u UserTermsOfService + if jsonErr := json.NewDecoder(r.Body).Decode(&u); jsonErr != nil { + return nil, nil, NewAppError("GetUserTermsOfService", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &u, BuildResponse(r), nil +} + +// CreateTermsOfService creates new terms of service. +func (c *Client4) CreateTermsOfService(text, userId string) (*TermsOfService, *Response, error) { + url := c.termsOfServiceRoute() + data := map[string]interface{}{"text": text} + r, err := c.DoAPIPost(url, StringInterfaceToJSON(data)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var tos TermsOfService + if jsonErr := json.NewDecoder(r.Body).Decode(&tos); jsonErr != nil { + return nil, nil, NewAppError("CreateTermsOfService", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &tos, BuildResponse(r), nil +} + +func (c *Client4) GetGroup(groupID, etag string) (*Group, *Response, error) { + r, err := c.DoAPIGet(c.groupRoute(groupID), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var g Group + if jsonErr := json.NewDecoder(r.Body).Decode(&g); jsonErr != nil { + return nil, nil, NewAppError("GetGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &g, BuildResponse(r), nil +} + +func (c *Client4) CreateGroup(group *Group) (*Group, *Response, error) { + groupJSON, jsonErr := json.Marshal(group) + if jsonErr != nil { + return nil, nil, NewAppError("CreateGroup", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes("/groups", groupJSON) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p Group + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("CreateGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +func (c *Client4) DeleteGroup(groupID string) (*Group, *Response, error) { + r, err := c.DoAPIDelete(c.groupRoute(groupID)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var p Group + if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil { + return nil, nil, NewAppError("DeleteGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &p, BuildResponse(r), nil +} + +func (c *Client4) PatchGroup(groupID string, patch *GroupPatch) (*Group, *Response, error) { + payload, _ := json.Marshal(patch) + r, err := c.DoAPIPut(c.groupRoute(groupID)+"/patch", string(payload)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var g Group + if jsonErr := json.NewDecoder(r.Body).Decode(&g); jsonErr != nil { + return nil, nil, NewAppError("PatchGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &g, BuildResponse(r), nil +} + +func (c *Client4) UpsertGroupMembers(groupID string, userIds *GroupModifyMembers) ([]*GroupMember, *Response, error) { + payload, jsonErr := json.Marshal(userIds) + if jsonErr != nil { + return nil, nil, NewAppError("UpsertGroupMembers", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.groupRoute(groupID)+"/members", payload) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var g []*GroupMember + if jsonErr := json.NewDecoder(r.Body).Decode(&g); jsonErr != nil { + return nil, nil, NewAppError("UpsertGroupMembers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return g, BuildResponse(r), nil +} + +func (c *Client4) DeleteGroupMembers(groupID string, userIds *GroupModifyMembers) ([]*GroupMember, *Response, error) { + payload, jsonErr := json.Marshal(userIds) + if jsonErr != nil { + return nil, nil, NewAppError("DeleteGroupMembers", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIDeleteBytes(c.groupRoute(groupID)+"/members", payload) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var g []*GroupMember + if jsonErr := json.NewDecoder(r.Body).Decode(&g); jsonErr != nil { + return nil, nil, NewAppError("DeleteGroupMembers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return g, BuildResponse(r), nil +} + +func (c *Client4) LinkGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType, patch *GroupSyncablePatch) (*GroupSyncable, *Response, error) { + payload, _ := json.Marshal(patch) + url := fmt.Sprintf("%s/link", c.groupSyncableRoute(groupID, syncableID, syncableType)) + r, err := c.DoAPIPost(url, string(payload)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var gs GroupSyncable + if jsonErr := json.NewDecoder(r.Body).Decode(&gs); jsonErr != nil { + return nil, nil, NewAppError("LinkGroupSyncable", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &gs, BuildResponse(r), nil +} + +func (c *Client4) UnlinkGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType) (*Response, error) { + url := fmt.Sprintf("%s/link", c.groupSyncableRoute(groupID, syncableID, syncableType)) + r, err := c.DoAPIDelete(url) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +func (c *Client4) GetGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType, etag string) (*GroupSyncable, *Response, error) { + r, err := c.DoAPIGet(c.groupSyncableRoute(groupID, syncableID, syncableType), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var gs GroupSyncable + if jsonErr := json.NewDecoder(r.Body).Decode(&gs); jsonErr != nil { + return nil, nil, NewAppError("GetGroupSyncable", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &gs, BuildResponse(r), nil +} + +func (c *Client4) GetGroupSyncables(groupID string, syncableType GroupSyncableType, etag string) ([]*GroupSyncable, *Response, error) { + r, err := c.DoAPIGet(c.groupSyncablesRoute(groupID, syncableType), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*GroupSyncable + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetGroupSyncables", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +func (c *Client4) PatchGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType, patch *GroupSyncablePatch) (*GroupSyncable, *Response, error) { + payload, _ := json.Marshal(patch) + r, err := c.DoAPIPut(c.groupSyncableRoute(groupID, syncableID, syncableType)+"/patch", string(payload)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var gs GroupSyncable + if jsonErr := json.NewDecoder(r.Body).Decode(&gs); jsonErr != nil { + return nil, nil, NewAppError("PatchGroupSyncable", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &gs, BuildResponse(r), nil +} + +func (c *Client4) TeamMembersMinusGroupMembers(teamID string, groupIDs []string, page, perPage int, etag string) ([]*UserWithGroups, int64, *Response, error) { + groupIDStr := strings.Join(groupIDs, ",") + query := fmt.Sprintf("?group_ids=%s&page=%d&per_page=%d", groupIDStr, page, perPage) + r, err := c.DoAPIGet(c.teamRoute(teamID)+"/members_minus_group_members"+query, etag) + if err != nil { + return nil, 0, BuildResponse(r), err + } + defer closeBody(r) + + var ugc UsersWithGroupsAndCount + if jsonErr := json.NewDecoder(r.Body).Decode(&ugc); jsonErr != nil { + return nil, 0, nil, NewAppError("TeamMembersMinusGroupMembers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return ugc.Users, ugc.Count, BuildResponse(r), nil +} + +func (c *Client4) ChannelMembersMinusGroupMembers(channelID string, groupIDs []string, page, perPage int, etag string) ([]*UserWithGroups, int64, *Response, error) { + groupIDStr := strings.Join(groupIDs, ",") + query := fmt.Sprintf("?group_ids=%s&page=%d&per_page=%d", groupIDStr, page, perPage) + r, err := c.DoAPIGet(c.channelRoute(channelID)+"/members_minus_group_members"+query, etag) + if err != nil { + return nil, 0, BuildResponse(r), err + } + defer closeBody(r) + var ugc UsersWithGroupsAndCount + if jsonErr := json.NewDecoder(r.Body).Decode(&ugc); jsonErr != nil { + return nil, 0, nil, NewAppError("ChannelMembersMinusGroupMembers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return ugc.Users, ugc.Count, BuildResponse(r), nil +} + +func (c *Client4) PatchConfig(config *Config) (*Config, *Response, error) { + buf, err := json.Marshal(config) + if err != nil { + return nil, nil, NewAppError("PatchConfig", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPutBytes(c.configRoute()+"/patch", buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ConfigFromJSON(r.Body), BuildResponse(r), nil +} + +func (c *Client4) GetChannelModerations(channelID string, etag string) ([]*ChannelModeration, *Response, error) { + r, err := c.DoAPIGet(c.channelRoute(channelID)+"/moderations", etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*ChannelModeration + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelModerations", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +func (c *Client4) PatchChannelModerations(channelID string, patch []*ChannelModerationPatch) ([]*ChannelModeration, *Response, error) { + payload, err := json.Marshal(patch) + if err != nil { + return nil, nil, NewAppError("PatchChannelModerations", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + + r, err := c.DoAPIPut(c.channelRoute(channelID)+"/moderations/patch", string(payload)) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*ChannelModeration + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("PatchChannelModerations", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +func (c *Client4) GetKnownUsers() ([]string, *Response, error) { + r, err := c.DoAPIGet(c.usersRoute()+"/known", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var userIds []string + json.NewDecoder(r.Body).Decode(&userIds) + return userIds, BuildResponse(r), nil +} + +// PublishUserTyping publishes a user is typing websocket event based on the provided TypingRequest. +func (c *Client4) PublishUserTyping(userID string, typingRequest TypingRequest) (*Response, error) { + buf, err := json.Marshal(typingRequest) + if err != nil { + return nil, NewAppError("PublishUserTyping", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.publishUserTypingRoute(userID), buf) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +func (c *Client4) GetChannelMemberCountsByGroup(channelID string, includeTimezones bool, etag string) ([]*ChannelMemberCountByGroup, *Response, error) { + r, err := c.DoAPIGet(c.channelRoute(channelID)+"/member_counts_by_group?include_timezones="+strconv.FormatBool(includeTimezones), etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var ch []*ChannelMemberCountByGroup + err = json.NewDecoder(r.Body).Decode(&ch) + if err != nil { + return nil, BuildResponse(r), NewAppError("GetChannelMemberCountsByGroup", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return ch, BuildResponse(r), nil +} + +// RequestTrialLicense will request a trial license and install it in the server +func (c *Client4) RequestTrialLicense(users int) (*Response, error) { + b, _ := json.Marshal(map[string]interface{}{"users": users, "terms_accepted": true}) + r, err := c.DoAPIPost("/trial-license", string(b)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// GetGroupStats retrieves stats for a Mattermost Group +func (c *Client4) GetGroupStats(groupID string) (*GroupStats, *Response, error) { + r, err := c.DoAPIGet(c.groupRoute(groupID)+"/stats", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var gs GroupStats + if jsonErr := json.NewDecoder(r.Body).Decode(&gs); jsonErr != nil { + return nil, nil, NewAppError("GetGroupStats", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &gs, BuildResponse(r), nil +} + +func (c *Client4) GetSidebarCategoriesForTeamForUser(userID, teamID, etag string) (*OrderedSidebarCategories, *Response, error) { + route := c.userCategoryRoute(userID, teamID) + r, err := c.DoAPIGet(route, etag) + if err != nil { + return nil, BuildResponse(r), err + } + + var cat *OrderedSidebarCategories + err = json.NewDecoder(r.Body).Decode(&cat) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.GetSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return cat, BuildResponse(r), nil +} + +func (c *Client4) CreateSidebarCategoryForTeamForUser(userID, teamID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response, error) { + payload, _ := json.Marshal(category) + route := c.userCategoryRoute(userID, teamID) + r, err := c.DoAPIPostBytes(route, payload) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var cat *SidebarCategoryWithChannels + err = json.NewDecoder(r.Body).Decode(&cat) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.CreateSidebarCategoryForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + return cat, BuildResponse(r), nil +} + +func (c *Client4) UpdateSidebarCategoriesForTeamForUser(userID, teamID string, categories []*SidebarCategoryWithChannels) ([]*SidebarCategoryWithChannels, *Response, error) { + payload, _ := json.Marshal(categories) + route := c.userCategoryRoute(userID, teamID) + + r, err := c.DoAPIPutBytes(route, payload) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var cat []*SidebarCategoryWithChannels + err = json.NewDecoder(r.Body).Decode(&cat) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + + return cat, BuildResponse(r), nil +} + +func (c *Client4) GetSidebarCategoryOrderForTeamForUser(userID, teamID, etag string) ([]string, *Response, error) { + route := c.userCategoryRoute(userID, teamID) + "/order" + r, err := c.DoAPIGet(route, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ArrayFromJSON(r.Body), BuildResponse(r), nil +} + +func (c *Client4) UpdateSidebarCategoryOrderForTeamForUser(userID, teamID string, order []string) ([]string, *Response, error) { + payload, _ := json.Marshal(order) + route := c.userCategoryRoute(userID, teamID) + "/order" + r, err := c.DoAPIPutBytes(route, payload) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ArrayFromJSON(r.Body), BuildResponse(r), nil +} + +func (c *Client4) GetSidebarCategoryForTeamForUser(userID, teamID, categoryID, etag string) (*SidebarCategoryWithChannels, *Response, error) { + route := c.userCategoryRoute(userID, teamID) + "/" + categoryID + r, err := c.DoAPIGet(route, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var cat *SidebarCategoryWithChannels + err = json.NewDecoder(r.Body).Decode(&cat) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + + return cat, BuildResponse(r), nil +} + +func (c *Client4) UpdateSidebarCategoryForTeamForUser(userID, teamID, categoryID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response, error) { + payload, _ := json.Marshal(category) + route := c.userCategoryRoute(userID, teamID) + "/" + categoryID + r, err := c.DoAPIPutBytes(route, payload) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var cat *SidebarCategoryWithChannels + err = json.NewDecoder(r.Body).Decode(&cat) + if err != nil { + return nil, BuildResponse(r), NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode) + } + + return cat, BuildResponse(r), nil +} + +// CheckIntegrity performs a database integrity check. +func (c *Client4) CheckIntegrity() ([]IntegrityCheckResult, *Response, error) { + r, err := c.DoAPIPost("/integrity", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var results []IntegrityCheckResult + if err := json.NewDecoder(r.Body).Decode(&results); err != nil { + return nil, BuildResponse(r), NewAppError("Api4.CheckIntegrity", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + return results, BuildResponse(r), nil +} + +func (c *Client4) GetNotices(lastViewed int64, teamId string, client NoticeClientType, clientVersion, locale, etag string) (NoticeMessages, *Response, error) { + url := fmt.Sprintf("/system/notices/%s?lastViewed=%d&client=%s&clientVersion=%s&locale=%s", teamId, lastViewed, client, clientVersion, locale) + r, err := c.DoAPIGet(url, etag) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + notices, err := UnmarshalProductNoticeMessages(r.Body) + if err != nil { + return nil, BuildResponse(r), err + } + return notices, BuildResponse(r), nil +} + +func (c *Client4) MarkNoticesViewed(ids []string) (*Response, error) { + r, err := c.DoAPIPut("/system/notices/view", ArrayToJSON(ids)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +func (c *Client4) CompleteOnboarding(request *CompleteOnboardingRequest) (*Response, error) { + buf, err := json.Marshal(request) + if err != nil { + return nil, NewAppError("CompleteOnboarding", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPost(c.systemRoute()+"/onboarding/complete", string(buf)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + + return BuildResponse(r), nil +} + +// CreateUpload creates a new upload session. +func (c *Client4) CreateUpload(us *UploadSession) (*UploadSession, *Response, error) { + buf, err := json.Marshal(us) + if err != nil { + return nil, nil, NewAppError("CreateUpload", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + } + r, err := c.DoAPIPostBytes(c.uploadsRoute(), buf) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var s UploadSession + if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil { + return nil, nil, NewAppError("CreateUpload", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &s, BuildResponse(r), nil +} + +// GetUpload returns the upload session for the specified uploadId. +func (c *Client4) GetUpload(uploadId string) (*UploadSession, *Response, error) { + r, err := c.DoAPIGet(c.uploadRoute(uploadId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var s UploadSession + if jsonErr := json.NewDecoder(r.Body).Decode(&s); jsonErr != nil { + return nil, nil, NewAppError("GetUpload", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &s, BuildResponse(r), nil +} + +// GetUploadsForUser returns the upload sessions created by the specified +// userId. +func (c *Client4) GetUploadsForUser(userId string) ([]*UploadSession, *Response, error) { + r, err := c.DoAPIGet(c.userRoute(userId)+"/uploads", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*UploadSession + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUploadsForUser", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +// UploadData performs an upload. On success it returns +// a FileInfo object. +func (c *Client4) UploadData(uploadId string, data io.Reader) (*FileInfo, *Response, error) { + url := c.uploadRoute(uploadId) + r, err := c.DoAPIRequestReader("POST", c.APIURL+url, data, nil) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var fi FileInfo + if r.StatusCode == http.StatusNoContent { + return nil, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&fi); jsonErr != nil { + return nil, nil, NewAppError("UploadData", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return &fi, BuildResponse(r), nil +} + +func (c *Client4) UpdatePassword(userId, currentPassword, newPassword string) (*Response, error) { + requestBody := map[string]string{"current_password": currentPassword, "new_password": newPassword} + r, err := c.DoAPIPut(c.userRoute(userId)+"/password", MapToJSON(requestBody)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +// Cloud Section + +func (c *Client4) GetCloudProducts() ([]*Product, *Response, error) { + r, err := c.DoAPIGet(c.cloudRoute()+"/products", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var cloudProducts []*Product + json.NewDecoder(r.Body).Decode(&cloudProducts) + + return cloudProducts, BuildResponse(r), nil +} + +func (c *Client4) CreateCustomerPayment() (*StripeSetupIntent, *Response, error) { + r, err := c.DoAPIPost(c.cloudRoute()+"/payment", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var setupIntent *StripeSetupIntent + json.NewDecoder(r.Body).Decode(&setupIntent) + + return setupIntent, BuildResponse(r), nil +} + +func (c *Client4) ConfirmCustomerPayment(confirmRequest *ConfirmPaymentMethodRequest) (*Response, error) { + json, _ := json.Marshal(confirmRequest) + + r, err := c.DoAPIPostBytes(c.cloudRoute()+"/payment/confirm", json) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + + return BuildResponse(r), nil +} + +func (c *Client4) GetCloudCustomer() (*CloudCustomer, *Response, error) { + r, err := c.DoAPIGet(c.cloudRoute()+"/customer", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var cloudCustomer *CloudCustomer + json.NewDecoder(r.Body).Decode(&cloudCustomer) + + return cloudCustomer, BuildResponse(r), nil +} + +func (c *Client4) GetSubscription() (*Subscription, *Response, error) { + r, err := c.DoAPIGet(c.cloudRoute()+"/subscription", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var subscription *Subscription + json.NewDecoder(r.Body).Decode(&subscription) + + return subscription, BuildResponse(r), nil +} + +func (c *Client4) GetInvoicesForSubscription() ([]*Invoice, *Response, error) { + r, err := c.DoAPIGet(c.cloudRoute()+"/subscription/invoices", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var invoices []*Invoice + json.NewDecoder(r.Body).Decode(&invoices) + + return invoices, BuildResponse(r), nil +} + +func (c *Client4) UpdateCloudCustomer(customerInfo *CloudCustomerInfo) (*CloudCustomer, *Response, error) { + customerBytes, _ := json.Marshal(customerInfo) + + r, err := c.DoAPIPutBytes(c.cloudRoute()+"/customer", customerBytes) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var customer *CloudCustomer + json.NewDecoder(r.Body).Decode(&customer) + + return customer, BuildResponse(r), nil +} + +func (c *Client4) UpdateCloudCustomerAddress(address *Address) (*CloudCustomer, *Response, error) { + addressBytes, _ := json.Marshal(address) + + r, err := c.DoAPIPutBytes(c.cloudRoute()+"/customer/address", addressBytes) + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var customer *CloudCustomer + json.NewDecoder(r.Body).Decode(&customer) + + return customer, BuildResponse(r), nil +} + +func (c *Client4) ListImports() ([]string, *Response, error) { + r, err := c.DoAPIGet(c.importsRoute(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ArrayFromJSON(r.Body), BuildResponse(r), nil +} + +func (c *Client4) ListExports() ([]string, *Response, error) { + r, err := c.DoAPIGet(c.exportsRoute(), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + return ArrayFromJSON(r.Body), BuildResponse(r), nil +} + +func (c *Client4) DeleteExport(name string) (*Response, error) { + r, err := c.DoAPIDelete(c.exportRoute(name)) + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + return BuildResponse(r), nil +} + +func (c *Client4) DownloadExport(name string, wr io.Writer, offset int64) (int64, *Response, error) { + var headers map[string]string + if offset > 0 { + headers = map[string]string{ + HeaderRange: fmt.Sprintf("bytes=%d-", offset), + } + } + r, err := c.DoAPIRequestWithHeaders(http.MethodGet, c.APIURL+c.exportRoute(name), "", headers) + if err != nil { + return 0, BuildResponse(r), err + } + defer closeBody(r) + n, err := io.Copy(wr, r.Body) + if err != nil { + return n, BuildResponse(r), NewAppError("DownloadExport", "model.client.copy.app_error", nil, err.Error(), r.StatusCode) + } + return n, BuildResponse(r), nil +} + +func (c *Client4) GetUserThreads(userId, teamId string, options GetUserThreadsOpts) (*Threads, *Response, error) { + v := url.Values{} + if options.Since != 0 { + v.Set("since", fmt.Sprintf("%d", options.Since)) + } + if options.Before != "" { + v.Set("before", options.Before) + } + if options.After != "" { + v.Set("after", options.After) + } + if options.PageSize != 0 { + v.Set("per_page", fmt.Sprintf("%d", options.PageSize)) + } + if options.Extended { + v.Set("extended", "true") + } + if options.Deleted { + v.Set("deleted", "true") + } + if options.Unread { + v.Set("unread", "true") + } + if options.ThreadsOnly { + v.Set("threadsOnly", "true") + } + if options.TotalsOnly { + v.Set("totalsOnly", "true") + } + url := c.userThreadsRoute(userId, teamId) + if len(v) > 0 { + url += "?" + v.Encode() + } + + r, err := c.DoAPIGet(url, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var threads Threads + json.NewDecoder(r.Body).Decode(&threads) + + return &threads, BuildResponse(r), nil +} + +func (c *Client4) GetUserThread(userId, teamId, threadId string, extended bool) (*ThreadResponse, *Response, error) { + url := c.userThreadRoute(userId, teamId, threadId) + if extended { + url += "?extended=true" + } + r, err := c.DoAPIGet(url, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var thread ThreadResponse + json.NewDecoder(r.Body).Decode(&thread) + + return &thread, BuildResponse(r), nil +} + +func (c *Client4) UpdateThreadsReadForUser(userId, teamId string) (*Response, error) { + r, err := c.DoAPIPut(fmt.Sprintf("%s/read", c.userThreadsRoute(userId, teamId)), "") + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + + return BuildResponse(r), nil +} + +func (c *Client4) SetThreadUnreadByPostId(userId, teamId, threadId, postId string) (*ThreadResponse, *Response, error) { + r, err := c.DoAPIPost(fmt.Sprintf("%s/set_unread/%s", c.userThreadRoute(userId, teamId, threadId), postId), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var thread ThreadResponse + json.NewDecoder(r.Body).Decode(&thread) + + return &thread, BuildResponse(r), nil +} + +func (c *Client4) UpdateThreadReadForUser(userId, teamId, threadId string, timestamp int64) (*ThreadResponse, *Response, error) { + r, err := c.DoAPIPut(fmt.Sprintf("%s/read/%d", c.userThreadRoute(userId, teamId, threadId), timestamp), "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var thread ThreadResponse + json.NewDecoder(r.Body).Decode(&thread) + + return &thread, BuildResponse(r), nil +} + +func (c *Client4) UpdateThreadFollowForUser(userId, teamId, threadId string, state bool) (*Response, error) { + var err error + var r *http.Response + if state { + r, err = c.DoAPIPut(c.userThreadRoute(userId, teamId, threadId)+"/following", "") + } else { + r, err = c.DoAPIDelete(c.userThreadRoute(userId, teamId, threadId) + "/following") + } + if err != nil { + return BuildResponse(r), err + } + defer closeBody(r) + + return BuildResponse(r), nil +} + +func (c *Client4) GetAllSharedChannels(teamID string, page, perPage int) ([]*SharedChannel, *Response, error) { + url := fmt.Sprintf("%s/%s?page=%d&per_page=%d", c.sharedChannelsRoute(), teamID, page, perPage) + r, err := c.DoAPIGet(url, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + + var channels []*SharedChannel + json.NewDecoder(r.Body).Decode(&channels) + + return channels, BuildResponse(r), nil +} + +func (c *Client4) GetRemoteClusterInfo(remoteID string) (RemoteClusterInfo, *Response, error) { + url := fmt.Sprintf("%s/remote_info/%s", c.sharedChannelsRoute(), remoteID) + r, err := c.DoAPIGet(url, "") + if err != nil { + return RemoteClusterInfo{}, BuildResponse(r), err + } + defer closeBody(r) + + var rci RemoteClusterInfo + json.NewDecoder(r.Body).Decode(&rci) + + return rci, BuildResponse(r), nil +} + +func (c *Client4) GetAncillaryPermissions(subsectionPermissions []string) ([]string, *Response, error) { + var returnedPermissions []string + url := fmt.Sprintf("%s/ancillary?subsection_permissions=%s", c.permissionsRoute(), strings.Join(subsectionPermissions, ",")) + r, err := c.DoAPIGet(url, "") + if err != nil { + return returnedPermissions, BuildResponse(r), err + } + defer closeBody(r) + + json.NewDecoder(r.Body).Decode(&returnedPermissions) + return returnedPermissions, BuildResponse(r), nil +} + +func (c *Client4) GetUsersWithInvalidEmails(page, perPage int) ([]*User, *Response, error) { + query := fmt.Sprintf("/invalid_emails?page=%v&per_page=%v", page, perPage) + r, err := c.DoAPIGet(c.usersRoute()+query, "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []*User + if r.StatusCode == http.StatusNotModified { + return list, BuildResponse(r), nil + } + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} + +func (c *Client4) GetAppliedSchemaMigrations() ([]AppliedMigration, *Response, error) { + r, err := c.DoAPIGet(c.systemRoute()+"/schema/version", "") + if err != nil { + return nil, BuildResponse(r), err + } + defer closeBody(r) + var list []AppliedMigration + if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil { + return nil, nil, NewAppError("GetUsers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) + } + return list, BuildResponse(r), nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go b/vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go new file mode 100644 index 00000000..7bab4d86 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go @@ -0,0 +1,183 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import "strings" + +const ( + EventTypeFailedPayment = "failed-payment" + EventTypeFailedPaymentNoCard = "failed-payment-no-card" + EventTypeSendAdminWelcomeEmail = "send-admin-welcome-email" + EventTypeTrialWillEnd = "trial-will-end" + EventTypeTrialEnded = "trial-ended" +) + +var MockCWS string + +type BillingScheme string + +const ( + BillingSchemePerSeat = BillingScheme("per_seat") + BillingSchemeFlatFee = BillingScheme("flat_fee") + BillingSchemeSalesServe = BillingScheme("sales_serve") +) + +type RecurringInterval string + +const ( + RecurringIntervalYearly = RecurringInterval("year") + RecurringIntervalMonthly = RecurringInterval("month") +) + +type SubscriptionFamily string + +const ( + SubscriptionFamilyCloud = SubscriptionFamily("cloud") + SubscriptionFamilyOnPrem = SubscriptionFamily("on-prem") +) + +// Product model represents a product on the cloud system. +type Product struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + PricePerSeat float64 `json:"price_per_seat"` + AddOns []*AddOn `json:"add_ons"` + SKU string `json:"sku"` + PriceID string `json:"price_id"` + Family SubscriptionFamily `json:"product_family"` + RecurringInterval RecurringInterval `json:"recurring_interval"` + BillingScheme BillingScheme `json:"billing_scheme"` +} + +// AddOn represents an addon to a product. +type AddOn struct { + ID string `json:"id"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + PricePerSeat float64 `json:"price_per_seat"` +} + +// StripeSetupIntent represents the SetupIntent model from Stripe for updating payment methods. +type StripeSetupIntent struct { + ID string `json:"id"` + ClientSecret string `json:"client_secret"` +} + +// ConfirmPaymentMethodRequest contains the fields for the customer payment update API. +type ConfirmPaymentMethodRequest struct { + StripeSetupIntentID string `json:"stripe_setup_intent_id"` + SubscriptionID string `json:"subscription_id"` +} + +// Customer model represents a customer on the system. +type CloudCustomer struct { + CloudCustomerInfo + ID string `json:"id"` + CreatorID string `json:"creator_id"` + CreateAt int64 `json:"create_at"` + BillingAddress *Address `json:"billing_address"` + CompanyAddress *Address `json:"company_address"` + PaymentMethod *PaymentMethod `json:"payment_method"` +} + +// CloudCustomerInfo represents editable info of a customer. +type CloudCustomerInfo struct { + Name string `json:"name"` + Email string `json:"email,omitempty"` + ContactFirstName string `json:"contact_first_name,omitempty"` + ContactLastName string `json:"contact_last_name,omitempty"` + NumEmployees int `json:"num_employees"` +} + +// Address model represents a customer's address. +type Address struct { + City string `json:"city"` + Country string `json:"country"` + Line1 string `json:"line1"` + Line2 string `json:"line2"` + PostalCode string `json:"postal_code"` + State string `json:"state"` +} + +// PaymentMethod represents methods of payment for a customer. +type PaymentMethod struct { + Type string `json:"type"` + LastFour string `json:"last_four"` + ExpMonth int `json:"exp_month"` + ExpYear int `json:"exp_year"` + CardBrand string `json:"card_brand"` + Name string `json:"name"` +} + +// Subscription model represents a subscription on the system. +type Subscription struct { + ID string `json:"id"` + CustomerID string `json:"customer_id"` + ProductID string `json:"product_id"` + AddOns []string `json:"add_ons"` + StartAt int64 `json:"start_at"` + EndAt int64 `json:"end_at"` + CreateAt int64 `json:"create_at"` + Seats int `json:"seats"` + Status string `json:"status"` + DNS string `json:"dns"` + IsPaidTier string `json:"is_paid_tier"` + LastInvoice *Invoice `json:"last_invoice"` + IsFreeTrial string `json:"is_free_trial"` + TrialEndAt int64 `json:"trial_end_at"` +} + +// GetWorkSpaceNameFromDNS returns the work space name. For example from test.mattermost.cloud.com, it returns test +func (s *Subscription) GetWorkSpaceNameFromDNS() string { + return strings.Split(s.DNS, ".")[0] +} + +// Invoice model represents a cloud invoice +type Invoice struct { + ID string `json:"id"` + Number string `json:"number"` + CreateAt int64 `json:"create_at"` + Total int64 `json:"total"` + Tax int64 `json:"tax"` + Status string `json:"status"` + Description string `json:"description"` + PeriodStart int64 `json:"period_start"` + PeriodEnd int64 `json:"period_end"` + SubscriptionID string `json:"subscription_id"` + Items []*InvoiceLineItem `json:"line_items"` + CurrentProductName string `json:"current_product_name"` +} + +// InvoiceLineItem model represents a cloud invoice lineitem tied to an invoice. +type InvoiceLineItem struct { + PriceID string `json:"price_id"` + Total int64 `json:"total"` + Quantity float64 `json:"quantity"` + PricePerUnit int64 `json:"price_per_unit"` + Description string `json:"description"` + Type string `json:"type"` + Metadata map[string]interface{} `json:"metadata"` +} + +type CWSWebhookPayload struct { + Event string `json:"event"` + FailedPayment *FailedPayment `json:"failed_payment"` + CloudWorkspaceOwner *CloudWorkspaceOwner `json:"cloud_workspace_owner"` + SubscriptionTrialEndUnixTimeStamp int64 `json:"trial_end_time_stamp"` +} + +type FailedPayment struct { + CardBrand string `json:"card_brand"` + LastFour string `json:"last_four"` + FailureMessage string `json:"failure_message"` +} + +// CloudWorkspaceOwner is part of the CWS Webhook payload that contains information about the user that created the workspace from the CWS +type CloudWorkspaceOwner struct { + UserName string `json:"username"` +} +type SubscriptionChange struct { + ProductID string `json:"product_id"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_discovery.go b/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_discovery.go similarity index 76% rename from vendor/github.com/mattermost/mattermost-server/v5/model/cluster_discovery.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/cluster_discovery.go index f6c9275a..160a5917 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_discovery.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_discovery.go @@ -4,15 +4,13 @@ package model import ( - "encoding/json" - "io" "net/http" "os" ) const ( - CDS_OFFLINE_AFTER_MILLIS = 1000 * 60 * 30 // 30 minutes - CDS_TYPE_APP = "mattermost_app" + CDSOfflineAfterMillis = 1000 * 60 * 30 // 30 minutes + CDSTypeApp = "mattermost_app" ) type ClusterDiscovery struct { @@ -39,20 +37,20 @@ func (o *ClusterDiscovery) PreSave() { func (o *ClusterDiscovery) AutoFillHostname() { // attempt to set the hostname from the OS - if len(o.Hostname) == 0 { + if o.Hostname == "" { if hn, err := os.Hostname(); err == nil { o.Hostname = hn } } } -func (o *ClusterDiscovery) AutoFillIpAddress(iface string, ipAddress string) { +func (o *ClusterDiscovery) AutoFillIPAddress(iface string, ipAddress string) { // attempt to set the hostname to the first non-local IP address - if len(o.Hostname) == 0 { - if len(ipAddress) > 0 { + if o.Hostname == "" { + if ipAddress != "" { o.Hostname = ipAddress } else { - o.Hostname = GetServerIpAddress(iface) + o.Hostname = GetServerIPAddress(iface) } } } @@ -93,15 +91,15 @@ func (o *ClusterDiscovery) IsValid() *AppError { return NewAppError("ClusterDiscovery.IsValid", "model.cluster.is_valid.id.app_error", nil, "", http.StatusBadRequest) } - if len(o.ClusterName) == 0 { + if o.ClusterName == "" { return NewAppError("ClusterDiscovery.IsValid", "model.cluster.is_valid.name.app_error", nil, "", http.StatusBadRequest) } - if len(o.Type) == 0 { + if o.Type == "" { return NewAppError("ClusterDiscovery.IsValid", "model.cluster.is_valid.type.app_error", nil, "", http.StatusBadRequest) } - if len(o.Hostname) == 0 { + if o.Hostname == "" { return NewAppError("ClusterDiscovery.IsValid", "model.cluster.is_valid.hostname.app_error", nil, "", http.StatusBadRequest) } @@ -115,23 +113,3 @@ func (o *ClusterDiscovery) IsValid() *AppError { return nil } - -func (o *ClusterDiscovery) ToJson() string { - b, err := json.Marshal(o) - if err != nil { - return "" - } - - return string(b) -} - -func ClusterDiscoveryFromJson(data io.Reader) *ClusterDiscovery { - decoder := json.NewDecoder(data) - var me ClusterDiscovery - err := decoder.Decode(&me) - if err == nil { - return &me - } - - return nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_info.go b/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_info.go new file mode 100644 index 00000000..48d11d2f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_info.go @@ -0,0 +1,12 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type ClusterInfo struct { + Id string `json:"id"` + Version string `json:"version"` + ConfigHash string `json:"config_hash"` + IPAddress string `json:"ipaddress"` + Hostname string `json:"hostname"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_message.go b/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_message.go new file mode 100644 index 00000000..90999ab0 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_message.go @@ -0,0 +1,63 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type ClusterEvent string + +const ( + ClusterEventPublish ClusterEvent = "publish" + ClusterEventUpdateStatus ClusterEvent = "update_status" + ClusterEventInvalidateAllCaches ClusterEvent = "inv_all_caches" + ClusterEventInvalidateCacheForReactions ClusterEvent = "inv_reactions" + ClusterEventInvalidateCacheForChannelMembersNotifyProps ClusterEvent = "inv_channel_members_notify_props" + ClusterEventInvalidateCacheForChannelByName ClusterEvent = "inv_channel_name" + ClusterEventInvalidateCacheForChannel ClusterEvent = "inv_channel" + ClusterEventInvalidateCacheForChannelGuestCount ClusterEvent = "inv_channel_guest_count" + ClusterEventInvalidateCacheForUser ClusterEvent = "inv_user" + ClusterEventInvalidateCacheForUserTeams ClusterEvent = "inv_user_teams" + ClusterEventClearSessionCacheForUser ClusterEvent = "clear_session_user" + ClusterEventInvalidateCacheForRoles ClusterEvent = "inv_roles" + ClusterEventInvalidateCacheForRolePermissions ClusterEvent = "inv_role_permissions" + ClusterEventInvalidateCacheForProfileByIds ClusterEvent = "inv_profile_ids" + ClusterEventInvalidateCacheForProfileInChannel ClusterEvent = "inv_profile_in_channel" + ClusterEventInvalidateCacheForSchemes ClusterEvent = "inv_schemes" + ClusterEventInvalidateCacheForFileInfos ClusterEvent = "inv_file_infos" + ClusterEventInvalidateCacheForWebhooks ClusterEvent = "inv_webhooks" + ClusterEventInvalidateCacheForEmojisById ClusterEvent = "inv_emojis_by_id" + ClusterEventInvalidateCacheForEmojisIdByName ClusterEvent = "inv_emojis_id_by_name" + ClusterEventInvalidateCacheForChannelFileCount ClusterEvent = "inv_channel_file_count" + ClusterEventInvalidateCacheForChannelPinnedpostsCounts ClusterEvent = "inv_channel_pinnedposts_counts" + ClusterEventInvalidateCacheForChannelMemberCounts ClusterEvent = "inv_channel_member_counts" + ClusterEventInvalidateCacheForLastPosts ClusterEvent = "inv_last_posts" + ClusterEventInvalidateCacheForLastPostTime ClusterEvent = "inv_last_post_time" + ClusterEventInvalidateCacheForTeams ClusterEvent = "inv_teams" + ClusterEventClearSessionCacheForAllUsers ClusterEvent = "inv_all_user_sessions" + ClusterEventInstallPlugin ClusterEvent = "install_plugin" + ClusterEventRemovePlugin ClusterEvent = "remove_plugin" + ClusterEventPluginEvent ClusterEvent = "plugin_event" + ClusterEventInvalidateCacheForTermsOfService ClusterEvent = "inv_terms_of_service" + ClusterEventBusyStateChanged ClusterEvent = "busy_state_change" + + // Gossip communication + ClusterGossipEventRequestGetLogs = "gossip_request_get_logs" + ClusterGossipEventResponseGetLogs = "gossip_response_get_logs" + ClusterGossipEventRequestGetClusterStats = "gossip_request_cluster_stats" + ClusterGossipEventResponseGetClusterStats = "gossip_response_cluster_stats" + ClusterGossipEventRequestGetPluginStatuses = "gossip_request_plugin_statuses" + ClusterGossipEventResponseGetPluginStatuses = "gossip_response_plugin_statuses" + ClusterGossipEventRequestSaveConfig = "gossip_request_save_config" + ClusterGossipEventResponseSaveConfig = "gossip_response_save_config" + + // SendTypes for ClusterMessage. + ClusterSendBestEffort = "best_effort" + ClusterSendReliable = "reliable" +) + +type ClusterMessage struct { + Event ClusterEvent `json:"event"` + SendType string `json:"-"` + WaitForAllToSend bool `json:"-"` + Data []byte `json:"data,omitempty"` + Props map[string]string `json:"props,omitempty"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_stats.go b/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_stats.go similarity index 62% rename from vendor/github.com/mattermost/mattermost-server/v5/model/cluster_stats.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/cluster_stats.go index afc2ab44..3b41cb6e 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_stats.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/cluster_stats.go @@ -3,25 +3,9 @@ package model -import ( - "encoding/json" - "io" -) - type ClusterStats struct { Id string `json:"id"` TotalWebsocketConnections int `json:"total_websocket_connections"` TotalReadDbConnections int `json:"total_read_db_connections"` TotalMasterDbConnections int `json:"total_master_db_connections"` } - -func (me *ClusterStats) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func ClusterStatsFromJson(data io.Reader) *ClusterStats { - var me *ClusterStats - json.NewDecoder(data).Decode(&me) - return me -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command.go b/vendor/github.com/mattermost/mattermost-server/v6/model/command.go similarity index 58% rename from vendor/github.com/mattermost/mattermost-server/v5/model/command.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/command.go index 6dcf52ae..4bb95298 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/command.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/command.go @@ -4,66 +4,44 @@ package model import ( - "encoding/json" - "io" "net/http" "strings" ) const ( - COMMAND_METHOD_POST = "P" - COMMAND_METHOD_GET = "G" - MIN_TRIGGER_LENGTH = 1 - MAX_TRIGGER_LENGTH = 128 + CommandMethodPost = "P" + CommandMethodGet = "G" + MinTriggerLength = 1 + MaxTriggerLength = 128 ) type Command struct { - Id string `json:"id"` - Token string `json:"token"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - CreatorId string `json:"creator_id"` - TeamId string `json:"team_id"` - Trigger string `json:"trigger"` - Method string `json:"method"` - Username string `json:"username"` - IconURL string `json:"icon_url"` - AutoComplete bool `json:"auto_complete"` - AutoCompleteDesc string `json:"auto_complete_desc"` - AutoCompleteHint string `json:"auto_complete_hint"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - URL string `json:"url"` + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + CreatorId string `json:"creator_id"` + TeamId string `json:"team_id"` + Trigger string `json:"trigger"` + Method string `json:"method"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + AutoComplete bool `json:"auto_complete"` + AutoCompleteDesc string `json:"auto_complete_desc"` + AutoCompleteHint string `json:"auto_complete_hint"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + URL string `json:"url"` + // PluginId records the id of the plugin that created this Command. If it is blank, the Command + // was not created by a plugin. + PluginId string `json:"plugin_id"` AutocompleteData *AutocompleteData `db:"-" json:"autocomplete_data,omitempty"` // AutocompleteIconData is a base64 encoded svg AutocompleteIconData string `db:"-" json:"autocomplete_icon_data,omitempty"` } -func (o *Command) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func CommandFromJson(data io.Reader) *Command { - var o *Command - json.NewDecoder(data).Decode(&o) - return o -} - -func CommandListToJson(l []*Command) string { - b, _ := json.Marshal(l) - return string(b) -} - -func CommandListFromJson(data io.Reader) []*Command { - var o []*Command - json.NewDecoder(data).Decode(&o) - return o -} - func (o *Command) IsValid() *AppError { - if !IsValidId(o.Id) { return NewAppError("Command.IsValid", "model.command.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -80,27 +58,37 @@ func (o *Command) IsValid() *AppError { return NewAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "", http.StatusBadRequest) } - if !IsValidId(o.CreatorId) { + // If the CreatorId is blank, this should be a command created by a plugin. + if o.CreatorId == "" && !IsValidPluginId(o.PluginId) { + return NewAppError("Command.IsValid", "model.command.is_valid.plugin_id.app_error", nil, "", http.StatusBadRequest) + } + + // If the PluginId is blank, this should be a command associated with a userId. + if o.PluginId == "" && !IsValidId(o.CreatorId) { return NewAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } + if o.CreatorId != "" && o.PluginId != "" { + return NewAppError("Command.IsValid", "model.command.is_valid.plugin_id.app_error", nil, "command cannot have both a CreatorId and a PluginId", http.StatusBadRequest) + } + if !IsValidId(o.TeamId) { return NewAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.Trigger) < MIN_TRIGGER_LENGTH || len(o.Trigger) > MAX_TRIGGER_LENGTH || strings.Index(o.Trigger, "/") == 0 || strings.Contains(o.Trigger, " ") { + if len(o.Trigger) < MinTriggerLength || len(o.Trigger) > MaxTriggerLength || strings.Index(o.Trigger, "/") == 0 || strings.Contains(o.Trigger, " ") { return NewAppError("Command.IsValid", "model.command.is_valid.trigger.app_error", nil, "", http.StatusBadRequest) } - if len(o.URL) == 0 || len(o.URL) > 1024 { + if o.URL == "" || len(o.URL) > 1024 { return NewAppError("Command.IsValid", "model.command.is_valid.url.app_error", nil, "", http.StatusBadRequest) } - if !IsValidHttpUrl(o.URL) { + if !IsValidHTTPURL(o.URL) { return NewAppError("Command.IsValid", "model.command.is_valid.url_http.app_error", nil, "", http.StatusBadRequest) } - if !(o.Method == COMMAND_METHOD_GET || o.Method == COMMAND_METHOD_POST) { + if !(o.Method == CommandMethodGet || o.Method == CommandMethodPost) { return NewAppError("Command.IsValid", "model.command.is_valid.method.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/command_args.go b/vendor/github.com/mattermost/mattermost-server/v6/model/command_args.go new file mode 100644 index 00000000..c8333acb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/command_args.go @@ -0,0 +1,45 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "github.com/mattermost/mattermost-server/v6/shared/i18n" +) + +type CommandArgs struct { + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + TeamId string `json:"team_id"` + RootId string `json:"root_id"` + ParentId string `json:"parent_id"` + TriggerId string `json:"trigger_id,omitempty"` + Command string `json:"command"` + SiteURL string `json:"-"` + T i18n.TranslateFunc `json:"-"` + UserMentions UserMentionMap `json:"-"` + ChannelMentions ChannelMentionMap `json:"-"` + + // DO NOT USE Session field is deprecated. MM-26398 + Session Session `json:"-"` +} + +// AddUserMention adds or overrides an entry in UserMentions with name username +// and identifier userId +func (o *CommandArgs) AddUserMention(username, userId string) { + if o.UserMentions == nil { + o.UserMentions = make(UserMentionMap) + } + + o.UserMentions[username] = userId +} + +// AddChannelMention adds or overrides an entry in ChannelMentions with name +// channelName and identifier channelId +func (o *CommandArgs) AddChannelMention(channelName, channelId string) { + if o.ChannelMentions == nil { + o.ChannelMentions = make(ChannelMentionMap) + } + + o.ChannelMentions[channelName] = channelId +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_autocomplete.go b/vendor/github.com/mattermost/mattermost-server/v6/model/command_autocomplete.go similarity index 88% rename from vendor/github.com/mattermost/mattermost-server/v5/model/command_autocomplete.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/command_autocomplete.go index 68d91b23..a71a08c6 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/command_autocomplete.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/command_autocomplete.go @@ -5,7 +5,6 @@ package model import ( "encoding/json" - "io" "net/url" "path" "reflect" @@ -52,7 +51,7 @@ type AutocompleteArg struct { HelpText string // Type of the argument Type AutocompleteArgType - // Required determins if argument is optional or not. + // Required determines if argument is optional or not. Required bool // Actual data of the argument (depends on the Type) Data interface{} @@ -109,7 +108,7 @@ func NewAutocompleteData(trigger, hint, helpText string) *AutocompleteData { Trigger: trigger, Hint: hint, HelpText: helpText, - RoleID: SYSTEM_USER_ROLE_ID, + RoleID: SystemUserRoleId, Arguments: []*AutocompleteArg{}, SubCommands: []*AutocompleteData{}, } @@ -234,7 +233,7 @@ func (ad *AutocompleteData) IsValid() error { if strings.ToLower(ad.Trigger) != ad.Trigger { return errors.New("Command should be lowercase") } - roles := []string{SYSTEM_ADMIN_ROLE_ID, SYSTEM_USER_ROLE_ID, ""} + roles := []string{SystemAdminRoleId, SystemUserRoleId, ""} if stringNotInSlice(ad.RoleID, roles) { return errors.New("Wrong role in the autocomplete data") } @@ -291,24 +290,6 @@ func (ad *AutocompleteData) IsValid() error { return nil } -// ToJSON encodes AutocompleteData struct to the json -func (ad *AutocompleteData) ToJSON() ([]byte, error) { - b, err := json.Marshal(ad) - if err != nil { - return nil, errors.Wrapf(err, "can't marshal slash command %s", ad.Trigger) - } - return b, nil -} - -// AutocompleteDataFromJSON decodes AutocompleteData struct from the json -func AutocompleteDataFromJSON(data []byte) (*AutocompleteData, error) { - var ad AutocompleteData - if err := json.Unmarshal(data, &ad); err != nil { - return nil, errors.Wrap(err, "can't unmarshal AutocompleteData") - } - return &ad, nil -} - // Equals method checks if argument is the same. func (a *AutocompleteArg) Equals(arg *AutocompleteArg) bool { if a.Name != arg.Name || @@ -419,32 +400,6 @@ func (a *AutocompleteArg) UnmarshalJSON(b []byte) error { return nil } -// AutocompleteSuggestionsToJSON returns json for a list of AutocompleteSuggestion objects -func AutocompleteSuggestionsToJSON(suggestions []AutocompleteSuggestion) []byte { - b, _ := json.Marshal(suggestions) - return b -} - -// AutocompleteSuggestionsFromJSON returns list of AutocompleteSuggestions from json. -func AutocompleteSuggestionsFromJSON(data io.Reader) []AutocompleteSuggestion { - var o []AutocompleteSuggestion - json.NewDecoder(data).Decode(&o) - return o -} - -// AutocompleteStaticListItemsToJSON returns json for a list of AutocompleteStaticListItem objects -func AutocompleteStaticListItemsToJSON(items []AutocompleteListItem) []byte { - b, _ := json.Marshal(items) - return b -} - -// AutocompleteStaticListItemsFromJSON returns list of AutocompleteStaticListItem from json. -func AutocompleteStaticListItemsFromJSON(data io.Reader) []AutocompleteListItem { - var o []AutocompleteListItem - json.NewDecoder(data).Decode(&o) - return o -} - func stringNotInSlice(a string, slice []string) bool { for _, b := range slice { if b == a { diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/command_request.go b/vendor/github.com/mattermost/mattermost-server/v6/model/command_request.go new file mode 100644 index 00000000..331394cb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/command_request.go @@ -0,0 +1,8 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type CommandMoveRequest struct { + TeamId string `json:"team_id"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_response.go b/vendor/github.com/mattermost/mattermost-server/v6/model/command_response.go similarity index 81% rename from vendor/github.com/mattermost/mattermost-server/v5/model/command_response.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/command_response.go index 26b6cceb..b2521f8e 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/command_response.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/command_response.go @@ -9,12 +9,12 @@ import ( "io/ioutil" "strings" - "github.com/mattermost/mattermost-server/v5/utils/jsonutils" + "github.com/mattermost/mattermost-server/v6/utils/jsonutils" ) const ( - COMMAND_RESPONSE_TYPE_IN_CHANNEL = "in_channel" - COMMAND_RESPONSE_TYPE_EPHEMERAL = "ephemeral" + CommandResponseTypeInChannel = "in_channel" + CommandResponseTypeEphemeral = "ephemeral" ) type CommandResponse struct { @@ -32,14 +32,9 @@ type CommandResponse struct { ExtraResponses []*CommandResponse `json:"extra_responses"` } -func (o *CommandResponse) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - func CommandResponseFromHTTPBody(contentType string, body io.Reader) (*CommandResponse, error) { if strings.TrimSpace(strings.Split(contentType, ";")[0]) == "application/json" { - return CommandResponseFromJson(body) + return CommandResponseFromJSON(body) } if b, err := ioutil.ReadAll(body); err == nil { return CommandResponseFromPlainText(string(b)), nil @@ -53,7 +48,7 @@ func CommandResponseFromPlainText(text string) *CommandResponse { } } -func CommandResponseFromJson(data io.Reader) (*CommandResponse, error) { +func CommandResponseFromJSON(data io.Reader) (*CommandResponse, error) { b, err := ioutil.ReadAll(data) if err != nil { return nil, err @@ -62,7 +57,7 @@ func CommandResponseFromJson(data io.Reader) (*CommandResponse, error) { var o CommandResponse err = json.Unmarshal(b, &o) if err != nil { - return nil, jsonutils.HumanizeJsonError(err, b) + return nil, jsonutils.HumanizeJSONError(err, b) } o.Attachments = StringifySlackFieldValue(o.Attachments) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_webhook.go b/vendor/github.com/mattermost/mattermost-server/v6/model/command_webhook.go similarity index 82% rename from vendor/github.com/mattermost/mattermost-server/v5/model/command_webhook.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/command_webhook.go index 42a16cc7..8093c1b7 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/command_webhook.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/command_webhook.go @@ -14,12 +14,11 @@ type CommandWebhook struct { UserId string ChannelId string RootId string - ParentId string UseCount int } const ( - COMMAND_WEBHOOK_LIFETIME = 1000 * 60 * 30 + CommandWebhookLifetime = 1000 * 60 * 30 ) func (o *CommandWebhook) PreSave() { @@ -53,13 +52,9 @@ func (o *CommandWebhook) IsValid() *AppError { return NewAppError("CommandWebhook.IsValid", "model.command_hook.channel_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.RootId) != 0 && !IsValidId(o.RootId) { + if o.RootId != "" && !IsValidId(o.RootId) { return NewAppError("CommandWebhook.IsValid", "model.command_hook.root_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.ParentId) != 0 && !IsValidId(o.ParentId) { - return NewAppError("CommandWebhook.IsValid", "model.command_hook.parent_id.app_error", nil, "", http.StatusBadRequest) - } - return nil } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/compliance.go b/vendor/github.com/mattermost/mattermost-server/v6/model/compliance.go similarity index 66% rename from vendor/github.com/mattermost/mattermost-server/v5/model/compliance.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/compliance.go index a86087c1..b46a0d10 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/compliance.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/compliance.go @@ -4,21 +4,19 @@ package model import ( - "encoding/json" - "io" "net/http" "strings" ) const ( - COMPLIANCE_STATUS_CREATED = "created" - COMPLIANCE_STATUS_RUNNING = "running" - COMPLIANCE_STATUS_FINISHED = "finished" - COMPLIANCE_STATUS_FAILED = "failed" - COMPLIANCE_STATUS_REMOVED = "removed" - - COMPLIANCE_TYPE_DAILY = "daily" - COMPLIANCE_TYPE_ADHOC = "adhoc" + ComplianceStatusCreated = "created" + ComplianceStatusRunning = "running" + ComplianceStatusFinished = "finished" + ComplianceStatusFailed = "failed" + ComplianceStatusRemoved = "removed" + + ComplianceTypeDaily = "daily" + ComplianceTypeAdhoc = "adhoc" ) type Compliance struct { @@ -37,9 +35,17 @@ type Compliance struct { type Compliances []Compliance -func (c *Compliance) ToJson() string { - b, _ := json.Marshal(c) - return string(b) +// ComplianceExportCursor is used for paginated iteration of posts +// for compliance export. +// We need to keep track of the last post ID in addition to the last post +// CreateAt to break ties when two posts have the same CreateAt. +type ComplianceExportCursor struct { + LastChannelsQueryPostCreateAt int64 + LastChannelsQueryPostID string + ChannelsQueryCompleted bool + LastDirectMessagesQueryPostCreateAt int64 + LastDirectMessagesQueryPostID string + DirectMessagesQueryCompleted bool } func (c *Compliance) PreSave() { @@ -48,7 +54,7 @@ func (c *Compliance) PreSave() { } if c.Status == "" { - c.Status = COMPLIANCE_STATUS_CREATED + c.Status = ComplianceStatusCreated } c.Count = 0 @@ -58,9 +64,14 @@ func (c *Compliance) PreSave() { c.CreateAt = GetMillis() } +func (c *Compliance) DeepCopy() *Compliance { + copy := *c + return © +} + func (c *Compliance) JobName() string { jobName := c.Type - if c.Type == COMPLIANCE_TYPE_DAILY { + if c.Type == ComplianceTypeDaily { jobName += "-" + c.Desc } @@ -70,7 +81,6 @@ func (c *Compliance) JobName() string { } func (c *Compliance) IsValid() *AppError { - if !IsValidId(c.Id) { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -79,7 +89,7 @@ func (c *Compliance) IsValid() *AppError { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } - if len(c.Desc) > 512 || len(c.Desc) == 0 { + if len(c.Desc) > 512 || c.Desc == "" { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.desc.app_error", nil, "", http.StatusBadRequest) } @@ -97,23 +107,3 @@ func (c *Compliance) IsValid() *AppError { return nil } - -func ComplianceFromJson(data io.Reader) *Compliance { - var c *Compliance - json.NewDecoder(data).Decode(&c) - return c -} - -func (c Compliances) ToJson() string { - if b, err := json.Marshal(c); err != nil { - return "[]" - } else { - return string(b) - } -} - -func CompliancesFromJson(data io.Reader) Compliances { - var o Compliances - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/compliance_post.go b/vendor/github.com/mattermost/mattermost-server/v6/model/compliance_post.go similarity index 61% rename from vendor/github.com/mattermost/mattermost-server/v5/model/compliance_post.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/compliance_post.go index fcf65075..5c859ffe 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/compliance_post.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/compliance_post.go @@ -30,7 +30,6 @@ type CompliancePost struct { PostUpdateAt int64 PostDeleteAt int64 PostRootId string - PostParentId string PostOriginalId string PostMessage string PostType string @@ -53,74 +52,70 @@ func CompliancePostHeader() []string { "UserUsername", "UserEmail", "UserNickname", + "UserType", "PostId", "PostCreateAt", "PostUpdateAt", "PostDeleteAt", "PostRootId", - "PostParentId", "PostOriginalId", "PostMessage", "PostType", "PostProps", "PostHashtags", "PostFileIds", - "UserType", } } func cleanComplianceStrings(in string) string { if matched, _ := regexp.MatchString("^\\s*(=|\\+|\\-)", in); matched { return "'" + in - - } else { - return in } + return in } -func (me *CompliancePost) Row() []string { +func (cp *CompliancePost) Row() []string { postDeleteAt := "" - if me.PostDeleteAt > 0 { - postDeleteAt = time.Unix(0, me.PostDeleteAt*int64(1000*1000)).Format(time.RFC3339) + if cp.PostDeleteAt > 0 { + postDeleteAt = time.Unix(0, cp.PostDeleteAt*int64(1000*1000)).Format(time.RFC3339) } postUpdateAt := "" - if me.PostUpdateAt != me.PostCreateAt { - postUpdateAt = time.Unix(0, me.PostUpdateAt*int64(1000*1000)).Format(time.RFC3339) + if cp.PostUpdateAt != cp.PostCreateAt { + postUpdateAt = time.Unix(0, cp.PostUpdateAt*int64(1000*1000)).Format(time.RFC3339) } userType := "user" - if me.IsBot { + if cp.IsBot { userType = "bot" } return []string{ - cleanComplianceStrings(me.TeamName), - cleanComplianceStrings(me.TeamDisplayName), + cleanComplianceStrings(cp.TeamName), + cleanComplianceStrings(cp.TeamDisplayName), - cleanComplianceStrings(me.ChannelName), - cleanComplianceStrings(me.ChannelDisplayName), - cleanComplianceStrings(me.ChannelType), + cleanComplianceStrings(cp.ChannelName), + cleanComplianceStrings(cp.ChannelDisplayName), + cleanComplianceStrings(cp.ChannelType), - cleanComplianceStrings(me.UserUsername), - cleanComplianceStrings(me.UserEmail), - cleanComplianceStrings(me.UserNickname), + cleanComplianceStrings(cp.UserUsername), + cleanComplianceStrings(cp.UserEmail), + cleanComplianceStrings(cp.UserNickname), userType, - me.PostId, - time.Unix(0, me.PostCreateAt*int64(1000*1000)).Format(time.RFC3339), + cp.PostId, + time.Unix(0, cp.PostCreateAt*int64(1000*1000)).Format(time.RFC3339), postUpdateAt, postDeleteAt, - me.PostRootId, - me.PostParentId, - me.PostOriginalId, - cleanComplianceStrings(me.PostMessage), - me.PostType, - me.PostProps, - me.PostHashtags, - me.PostFileIds, + cp.PostRootId, + cp.PostOriginalId, + cleanComplianceStrings(cp.PostMessage), + cp.PostType, + cp.PostProps, + cp.PostHashtags, + cp.PostFileIds, } } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/config.go b/vendor/github.com/mattermost/mattermost-server/v6/model/config.go new file mode 100644 index 00000000..e29c8efa --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/config.go @@ -0,0 +1,3975 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "crypto/tls" + "encoding/json" + "io" + "math" + "net" + "net/http" + "net/url" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/mattermost/ldap" + + "github.com/mattermost/mattermost-server/v6/shared/filestore" + "github.com/mattermost/mattermost-server/v6/shared/mlog" +) + +const ( + ConnSecurityNone = "" + ConnSecurityPlain = "PLAIN" + ConnSecurityTLS = "TLS" + ConnSecurityStarttls = "STARTTLS" + + ImageDriverLocal = "local" + ImageDriverS3 = "amazons3" + + DatabaseDriverMysql = "mysql" + DatabaseDriverPostgres = "postgres" + + SearchengineElasticsearch = "elasticsearch" + + MinioAccessKey = "minioaccesskey" + MinioSecretKey = "miniosecretkey" + MinioBucket = "mattermost-test" + + PasswordMaximumLength = 64 + PasswordMinimumLength = 5 + + ServiceGitlab = "gitlab" + ServiceGoogle = "google" + ServiceOffice365 = "office365" + ServiceOpenid = "openid" + + GenericNoChannelNotification = "generic_no_channel" + GenericNotification = "generic" + GenericNotificationServer = "https://push-test.mattermost.com" + MmSupportAdvisorAddress = "support-advisor@mattermost.com" + FullNotification = "full" + IdLoadedNotification = "id_loaded" + + DirectMessageAny = "any" + DirectMessageTeam = "team" + + ShowUsername = "username" + ShowNicknameFullName = "nickname_full_name" + ShowFullName = "full_name" + + PermissionsAll = "all" + PermissionsChannelAdmin = "channel_admin" + PermissionsTeamAdmin = "team_admin" + PermissionsSystemAdmin = "system_admin" + + FakeSetting = "********************************" + + RestrictEmojiCreationAll = "all" + RestrictEmojiCreationAdmin = "admin" + RestrictEmojiCreationSystemAdmin = "system_admin" + + PermissionsDeletePostAll = "all" + PermissionsDeletePostTeamAdmin = "team_admin" + PermissionsDeletePostSystemAdmin = "system_admin" + + GroupUnreadChannelsDisabled = "disabled" + GroupUnreadChannelsDefaultOn = "default_on" + GroupUnreadChannelsDefaultOff = "default_off" + + CollapsedThreadsDisabled = "disabled" + CollapsedThreadsDefaultOn = "default_on" + CollapsedThreadsDefaultOff = "default_off" + + EmailBatchingBufferSize = 256 + EmailBatchingInterval = 30 + + EmailNotificationContentsFull = "full" + EmailNotificationContentsGeneric = "generic" + + EmailSMTPDefaultServer = "localhost" + EmailSMTPDefaultPort = "10025" + + SitenameMaxLength = 30 + + ServiceSettingsDefaultSiteURL = "http://localhost:8065" + ServiceSettingsDefaultTLSCertFile = "" + ServiceSettingsDefaultTLSKeyFile = "" + ServiceSettingsDefaultReadTimeout = 300 + ServiceSettingsDefaultWriteTimeout = 300 + ServiceSettingsDefaultIdleTimeout = 60 + ServiceSettingsDefaultMaxLoginAttempts = 10 + ServiceSettingsDefaultAllowCorsFrom = "" + ServiceSettingsDefaultListenAndAddress = ":8065" + ServiceSettingsDefaultGfycatAPIKey = "2_KtH_W5" + ServiceSettingsDefaultGfycatAPISecret = "3wLVZPiswc3DnaiaFoLkDvB4X0IV6CpMkj4tf2inJRsBY6-FnkT08zGmppWFgeof" + ServiceSettingsDefaultDeveloperFlags = "" + + TeamSettingsDefaultSiteName = "Mattermost" + TeamSettingsDefaultMaxUsersPerTeam = 50 + TeamSettingsDefaultCustomBrandText = "" + TeamSettingsDefaultCustomDescriptionText = "" + TeamSettingsDefaultUserStatusAwayTimeout = 300 + + SqlSettingsDefaultDataSource = "postgres://mmuser:mostest@localhost/mattermost_test?sslmode=disable&connect_timeout=10&binary_parameters=yes" + + FileSettingsDefaultDirectory = "./data/" + + ImportSettingsDefaultDirectory = "./import" + ImportSettingsDefaultRetentionDays = 30 + + ExportSettingsDefaultDirectory = "./export" + ExportSettingsDefaultRetentionDays = 30 + + EmailSettingsDefaultFeedbackOrganization = "" + + SupportSettingsDefaultTermsOfServiceLink = "https://mattermost.com/terms-of-use/" + SupportSettingsDefaultPrivacyPolicyLink = "https://mattermost.com/privacy-policy/" + SupportSettingsDefaultAboutLink = "https://docs.mattermost.com/about/product.html/" + SupportSettingsDefaultHelpLink = "https://mattermost.com/default-help/" + SupportSettingsDefaultReportAProblemLink = "https://mattermost.com/default-report-a-problem/" + SupportSettingsDefaultSupportEmail = "" + SupportSettingsDefaultReAcceptancePeriod = 365 + + LdapSettingsDefaultFirstNameAttribute = "" + LdapSettingsDefaultLastNameAttribute = "" + LdapSettingsDefaultEmailAttribute = "" + LdapSettingsDefaultUsernameAttribute = "" + LdapSettingsDefaultNicknameAttribute = "" + LdapSettingsDefaultIdAttribute = "" + LdapSettingsDefaultPositionAttribute = "" + LdapSettingsDefaultLoginFieldName = "" + LdapSettingsDefaultGroupDisplayNameAttribute = "" + LdapSettingsDefaultGroupIdAttribute = "" + LdapSettingsDefaultPictureAttribute = "" + + SamlSettingsDefaultIdAttribute = "" + SamlSettingsDefaultGuestAttribute = "" + SamlSettingsDefaultAdminAttribute = "" + SamlSettingsDefaultFirstNameAttribute = "" + SamlSettingsDefaultLastNameAttribute = "" + SamlSettingsDefaultEmailAttribute = "" + SamlSettingsDefaultUsernameAttribute = "" + SamlSettingsDefaultNicknameAttribute = "" + SamlSettingsDefaultLocaleAttribute = "" + SamlSettingsDefaultPositionAttribute = "" + + SamlSettingsSignatureAlgorithmSha1 = "RSAwithSHA1" + SamlSettingsSignatureAlgorithmSha256 = "RSAwithSHA256" + SamlSettingsSignatureAlgorithmSha512 = "RSAwithSHA512" + SamlSettingsDefaultSignatureAlgorithm = SamlSettingsSignatureAlgorithmSha1 + + SamlSettingsCanonicalAlgorithmC14n = "Canonical1.0" + SamlSettingsCanonicalAlgorithmC14n11 = "Canonical1.1" + SamlSettingsDefaultCanonicalAlgorithm = SamlSettingsCanonicalAlgorithmC14n + + NativeappSettingsDefaultAppDownloadLink = "https://mattermost.com/download/#mattermostApps" + NativeappSettingsDefaultAndroidAppDownloadLink = "https://mattermost.com/mattermost-android-app/" + NativeappSettingsDefaultIosAppDownloadLink = "https://mattermost.com/mattermost-ios-app/" + + ExperimentalSettingsDefaultLinkMetadataTimeoutMilliseconds = 5000 + + AnalyticsSettingsDefaultMaxUsersForStatistics = 2500 + + AnnouncementSettingsDefaultBannerColor = "#f2a93b" + AnnouncementSettingsDefaultBannerTextColor = "#333333" + AnnouncementSettingsDefaultNoticesJsonURL = "https://notices.mattermost.com/" + AnnouncementSettingsDefaultNoticesFetchFrequencySeconds = 3600 + + TeamSettingsDefaultTeamText = "default" + + ElasticsearchSettingsDefaultConnectionURL = "http://localhost:9200" + ElasticsearchSettingsDefaultUsername = "elastic" + ElasticsearchSettingsDefaultPassword = "changeme" + ElasticsearchSettingsDefaultPostIndexReplicas = 1 + ElasticsearchSettingsDefaultPostIndexShards = 1 + ElasticsearchSettingsDefaultChannelIndexReplicas = 1 + ElasticsearchSettingsDefaultChannelIndexShards = 1 + ElasticsearchSettingsDefaultUserIndexReplicas = 1 + ElasticsearchSettingsDefaultUserIndexShards = 1 + ElasticsearchSettingsDefaultAggregatePostsAfterDays = 365 + ElasticsearchSettingsDefaultPostsAggregatorJobStartTime = "03:00" + ElasticsearchSettingsDefaultIndexPrefix = "" + ElasticsearchSettingsDefaultLiveIndexingBatchSize = 1 + ElasticsearchSettingsDefaultRequestTimeoutSeconds = 30 + ElasticsearchSettingsDefaultBatchSize = 10000 + + BleveSettingsDefaultIndexDir = "" + BleveSettingsDefaultBatchSize = 10000 + + DataRetentionSettingsDefaultMessageRetentionDays = 365 + DataRetentionSettingsDefaultFileRetentionDays = 365 + DataRetentionSettingsDefaultBoardsRetentionDays = 365 + DataRetentionSettingsDefaultDeletionJobStartTime = "02:00" + DataRetentionSettingsDefaultBatchSize = 3000 + + PluginSettingsDefaultDirectory = "./plugins" + PluginSettingsDefaultClientDirectory = "./client/plugins" + PluginSettingsDefaultEnableMarketplace = true + PluginSettingsDefaultMarketplaceURL = "https://api.integrations.mattermost.com" + PluginSettingsOldMarketplaceURL = "https://marketplace.integrations.mattermost.com" + + ComplianceExportTypeCsv = "csv" + ComplianceExportTypeActiance = "actiance" + ComplianceExportTypeGlobalrelay = "globalrelay" + ComplianceExportTypeGlobalrelayZip = "globalrelay-zip" + GlobalrelayCustomerTypeA9 = "A9" + GlobalrelayCustomerTypeA10 = "A10" + + ClientSideCertCheckPrimaryAuth = "primary" + ClientSideCertCheckSecondaryAuth = "secondary" + + ImageProxyTypeLocal = "local" + ImageProxyTypeAtmosCamo = "atmos/camo" + + GoogleSettingsDefaultScope = "profile email" + GoogleSettingsDefaultAuthEndpoint = "https://accounts.google.com/o/oauth2/v2/auth" + GoogleSettingsDefaultTokenEndpoint = "https://www.googleapis.com/oauth2/v4/token" + GoogleSettingsDefaultUserAPIEndpoint = "https://people.googleapis.com/v1/people/me?personFields=names,emailAddresses,nicknames,metadata" + + Office365SettingsDefaultScope = "User.Read" + Office365SettingsDefaultAuthEndpoint = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize" + Office365SettingsDefaultTokenEndpoint = "https://login.microsoftonline.com/common/oauth2/v2.0/token" + Office365SettingsDefaultUserAPIEndpoint = "https://graph.microsoft.com/v1.0/me" + + CloudSettingsDefaultCwsURL = "https://customers.mattermost.com" + CloudSettingsDefaultCwsAPIURL = "https://portal.internal.prod.cloud.mattermost.com" + OpenidSettingsDefaultScope = "profile openid email" + + LocalModeSocketPath = "/var/tmp/mattermost_local.socket" +) + +func GetDefaultAppCustomURLSchemes() []string { + return []string{"mmauth://", "mmauthbeta://"} +} + +var ServerTLSSupportedCiphers = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +type ServiceSettings struct { + SiteURL *string `access:"environment_web_server,authentication_saml,write_restrictable"` + WebsocketURL *string `access:"write_restrictable,cloud_restrictable"` + LicenseFileLocation *string `access:"write_restrictable,cloud_restrictable"` // telemetry: none + ListenAddress *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` // telemetry: none + ConnectionSecurity *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` + TLSCertFile *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` + TLSKeyFile *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` + TLSMinVer *string `access:"write_restrictable,cloud_restrictable"` // telemetry: none + TLSStrictTransport *bool `access:"write_restrictable,cloud_restrictable"` + // In seconds. + TLSStrictTransportMaxAge *int64 `access:"write_restrictable,cloud_restrictable"` // telemetry: none + TLSOverwriteCiphers []string `access:"write_restrictable,cloud_restrictable"` // telemetry: none + UseLetsEncrypt *bool `access:"environment_web_server,write_restrictable,cloud_restrictable"` + LetsEncryptCertificateCacheFile *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` // telemetry: none + Forward80To443 *bool `access:"environment_web_server,write_restrictable,cloud_restrictable"` + TrustedProxyIPHeader []string `access:"write_restrictable,cloud_restrictable"` // telemetry: none + ReadTimeout *int `access:"environment_web_server,write_restrictable,cloud_restrictable"` + WriteTimeout *int `access:"environment_web_server,write_restrictable,cloud_restrictable"` + IdleTimeout *int `access:"write_restrictable,cloud_restrictable"` + MaximumLoginAttempts *int `access:"authentication_password,write_restrictable,cloud_restrictable"` + GoroutineHealthThreshold *int `access:"write_restrictable,cloud_restrictable"` // telemetry: none + EnableOAuthServiceProvider *bool `access:"integrations_integration_management"` + EnableIncomingWebhooks *bool `access:"integrations_integration_management"` + EnableOutgoingWebhooks *bool `access:"integrations_integration_management"` + EnableCommands *bool `access:"integrations_integration_management"` + EnablePostUsernameOverride *bool `access:"integrations_integration_management"` + EnablePostIconOverride *bool `access:"integrations_integration_management"` + GoogleDeveloperKey *string `access:"site_posts,write_restrictable,cloud_restrictable"` + EnableLinkPreviews *bool `access:"site_posts"` + EnablePermalinkPreviews *bool `access:"site_posts"` + RestrictLinkPreviews *string `access:"site_posts"` + EnableTesting *bool `access:"environment_developer,write_restrictable,cloud_restrictable"` + EnableDeveloper *bool `access:"environment_developer,write_restrictable,cloud_restrictable"` + DeveloperFlags *string `access:"environment_developer"` + EnableClientPerformanceDebugging *bool `access:"environment_developer,write_restrictable,cloud_restrictable"` + EnableOpenTracing *bool `access:"write_restrictable,cloud_restrictable"` + EnableSecurityFixAlert *bool `access:"environment_smtp,write_restrictable,cloud_restrictable"` + EnableInsecureOutgoingConnections *bool `access:"environment_web_server,write_restrictable,cloud_restrictable"` + AllowedUntrustedInternalConnections *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` + EnableMultifactorAuthentication *bool `access:"authentication_mfa"` + EnforceMultifactorAuthentication *bool `access:"authentication_mfa"` + EnableUserAccessTokens *bool `access:"integrations_integration_management"` + AllowCorsFrom *string `access:"integrations_cors,write_restrictable,cloud_restrictable"` + CorsExposedHeaders *string `access:"integrations_cors,write_restrictable,cloud_restrictable"` + CorsAllowCredentials *bool `access:"integrations_cors,write_restrictable,cloud_restrictable"` + CorsDebug *bool `access:"integrations_cors,write_restrictable,cloud_restrictable"` + AllowCookiesForSubdomains *bool `access:"write_restrictable,cloud_restrictable"` + ExtendSessionLengthWithActivity *bool `access:"environment_session_lengths,write_restrictable,cloud_restrictable"` + SessionLengthWebInDays *int `access:"environment_session_lengths,write_restrictable,cloud_restrictable"` + SessionLengthMobileInDays *int `access:"environment_session_lengths,write_restrictable,cloud_restrictable"` + SessionLengthSSOInDays *int `access:"environment_session_lengths,write_restrictable,cloud_restrictable"` + SessionCacheInMinutes *int `access:"environment_session_lengths,write_restrictable,cloud_restrictable"` + SessionIdleTimeoutInMinutes *int `access:"environment_session_lengths,write_restrictable,cloud_restrictable"` + WebsocketSecurePort *int `access:"write_restrictable,cloud_restrictable"` // telemetry: none + WebsocketPort *int `access:"write_restrictable,cloud_restrictable"` // telemetry: none + WebserverMode *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` + EnableGifPicker *bool `access:"integrations_gif"` + GfycatAPIKey *string `access:"integrations_gif"` + GfycatAPISecret *string `access:"integrations_gif"` + EnableCustomEmoji *bool `access:"site_emoji"` + EnableEmojiPicker *bool `access:"site_emoji"` + PostEditTimeLimit *int `access:"user_management_permissions"` + TimeBetweenUserTypingUpdatesMilliseconds *int64 `access:"experimental_features,write_restrictable,cloud_restrictable"` + EnablePostSearch *bool `access:"write_restrictable,cloud_restrictable"` + EnableFileSearch *bool `access:"write_restrictable"` + MinimumHashtagLength *int `access:"environment_database,write_restrictable,cloud_restrictable"` + EnableUserTypingMessages *bool `access:"experimental_features,write_restrictable,cloud_restrictable"` + EnableChannelViewedMessages *bool `access:"experimental_features,write_restrictable,cloud_restrictable"` + EnableUserStatuses *bool `access:"write_restrictable,cloud_restrictable"` + ExperimentalEnableAuthenticationTransfer *bool `access:"experimental_features,write_restrictable,cloud_restrictable"` + ClusterLogTimeoutMilliseconds *int `access:"write_restrictable,cloud_restrictable"` + EnablePreviewFeatures *bool `access:"experimental_features"` + EnableTutorial *bool `access:"experimental_features"` + EnableOnboardingFlow *bool `access:"experimental_features"` + ExperimentalEnableDefaultChannelLeaveJoinMessages *bool `access:"experimental_features"` + ExperimentalGroupUnreadChannels *string `access:"experimental_features"` + EnableAPITeamDeletion *bool + EnableAPIUserDeletion *bool + ExperimentalEnableHardenedMode *bool `access:"experimental_features"` + ExperimentalStrictCSRFEnforcement *bool `access:"experimental_features,write_restrictable,cloud_restrictable"` + EnableEmailInvitations *bool `access:"authentication_signup"` + DisableBotsWhenOwnerIsDeactivated *bool `access:"integrations_bot_accounts,write_restrictable,cloud_restrictable"` + EnableBotAccountCreation *bool `access:"integrations_bot_accounts"` + EnableSVGs *bool `access:"site_posts"` + EnableLatex *bool `access:"site_posts"` + EnableInlineLatex *bool `access:"site_posts"` + EnableAPIChannelDeletion *bool + EnableLocalMode *bool + LocalModeSocketLocation *string // telemetry: none + EnableAWSMetering *bool // telemetry: none + SplitKey *string `access:"experimental_feature_flags,write_restrictable"` // telemetry: none + FeatureFlagSyncIntervalSeconds *int `access:"experimental_feature_flags,write_restrictable"` // telemetry: none + DebugSplit *bool `access:"experimental_feature_flags,write_restrictable"` // telemetry: none + ThreadAutoFollow *bool `access:"experimental_features"` + CollapsedThreads *string `access:"experimental_features"` + ManagedResourcePaths *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` + EnableCustomGroups *bool `access:"site_users_and_teams"` +} + +func (s *ServiceSettings) SetDefaults(isUpdate bool) { + if s.EnableEmailInvitations == nil { + // If the site URL is also not present then assume this is a clean install + if s.SiteURL == nil { + s.EnableEmailInvitations = NewBool(false) + } else { + s.EnableEmailInvitations = NewBool(true) + } + } + + if s.SiteURL == nil { + if s.EnableDeveloper != nil && *s.EnableDeveloper { + s.SiteURL = NewString(ServiceSettingsDefaultSiteURL) + } else { + s.SiteURL = NewString("") + } + } + + if s.WebsocketURL == nil { + s.WebsocketURL = NewString("") + } + + if s.LicenseFileLocation == nil { + s.LicenseFileLocation = NewString("") + } + + if s.ListenAddress == nil { + s.ListenAddress = NewString(ServiceSettingsDefaultListenAndAddress) + } + + if s.EnableLinkPreviews == nil { + s.EnableLinkPreviews = NewBool(true) + } + + if s.EnablePermalinkPreviews == nil { + s.EnablePermalinkPreviews = NewBool(true) + } + + if s.RestrictLinkPreviews == nil { + s.RestrictLinkPreviews = NewString("") + } + + if s.EnableTesting == nil { + s.EnableTesting = NewBool(false) + } + + if s.EnableDeveloper == nil { + s.EnableDeveloper = NewBool(false) + } + + if s.DeveloperFlags == nil { + s.DeveloperFlags = NewString("") + } + + if s.EnableClientPerformanceDebugging == nil { + s.EnableClientPerformanceDebugging = NewBool(false) + } + + if s.EnableOpenTracing == nil { + s.EnableOpenTracing = NewBool(false) + } + + if s.EnableSecurityFixAlert == nil { + s.EnableSecurityFixAlert = NewBool(true) + } + + if s.EnableInsecureOutgoingConnections == nil { + s.EnableInsecureOutgoingConnections = NewBool(false) + } + + if s.AllowedUntrustedInternalConnections == nil { + s.AllowedUntrustedInternalConnections = NewString("") + } + + if s.EnableMultifactorAuthentication == nil { + s.EnableMultifactorAuthentication = NewBool(false) + } + + if s.EnforceMultifactorAuthentication == nil { + s.EnforceMultifactorAuthentication = NewBool(false) + } + + if s.EnableUserAccessTokens == nil { + s.EnableUserAccessTokens = NewBool(false) + } + + if s.GoroutineHealthThreshold == nil { + s.GoroutineHealthThreshold = NewInt(-1) + } + + if s.GoogleDeveloperKey == nil { + s.GoogleDeveloperKey = NewString("") + } + + if s.EnableOAuthServiceProvider == nil { + s.EnableOAuthServiceProvider = NewBool(false) + } + + if s.EnableIncomingWebhooks == nil { + s.EnableIncomingWebhooks = NewBool(true) + } + + if s.EnableOutgoingWebhooks == nil { + s.EnableOutgoingWebhooks = NewBool(true) + } + + if s.ConnectionSecurity == nil { + s.ConnectionSecurity = NewString("") + } + + if s.TLSKeyFile == nil { + s.TLSKeyFile = NewString(ServiceSettingsDefaultTLSKeyFile) + } + + if s.TLSCertFile == nil { + s.TLSCertFile = NewString(ServiceSettingsDefaultTLSCertFile) + } + + if s.TLSMinVer == nil { + s.TLSMinVer = NewString("1.2") + } + + if s.TLSStrictTransport == nil { + s.TLSStrictTransport = NewBool(false) + } + + if s.TLSStrictTransportMaxAge == nil { + s.TLSStrictTransportMaxAge = NewInt64(63072000) + } + + if s.TLSOverwriteCiphers == nil { + s.TLSOverwriteCiphers = []string{} + } + + if s.UseLetsEncrypt == nil { + s.UseLetsEncrypt = NewBool(false) + } + + if s.LetsEncryptCertificateCacheFile == nil { + s.LetsEncryptCertificateCacheFile = NewString("./config/letsencrypt.cache") + } + + if s.ReadTimeout == nil { + s.ReadTimeout = NewInt(ServiceSettingsDefaultReadTimeout) + } + + if s.WriteTimeout == nil { + s.WriteTimeout = NewInt(ServiceSettingsDefaultWriteTimeout) + } + + if s.IdleTimeout == nil { + s.IdleTimeout = NewInt(ServiceSettingsDefaultIdleTimeout) + } + + if s.MaximumLoginAttempts == nil { + s.MaximumLoginAttempts = NewInt(ServiceSettingsDefaultMaxLoginAttempts) + } + + if s.Forward80To443 == nil { + s.Forward80To443 = NewBool(false) + } + + if s.TrustedProxyIPHeader == nil { + s.TrustedProxyIPHeader = []string{} + } + + if s.TimeBetweenUserTypingUpdatesMilliseconds == nil { + s.TimeBetweenUserTypingUpdatesMilliseconds = NewInt64(5000) + } + + if s.EnablePostSearch == nil { + s.EnablePostSearch = NewBool(true) + } + + if s.EnableFileSearch == nil { + s.EnableFileSearch = NewBool(true) + } + + if s.MinimumHashtagLength == nil { + s.MinimumHashtagLength = NewInt(3) + } + + if s.EnableUserTypingMessages == nil { + s.EnableUserTypingMessages = NewBool(true) + } + + if s.EnableChannelViewedMessages == nil { + s.EnableChannelViewedMessages = NewBool(true) + } + + if s.EnableUserStatuses == nil { + s.EnableUserStatuses = NewBool(true) + } + + if s.ClusterLogTimeoutMilliseconds == nil { + s.ClusterLogTimeoutMilliseconds = NewInt(2000) + } + + if s.EnableTutorial == nil { + s.EnableTutorial = NewBool(true) + } + + if s.EnableOnboardingFlow == nil { + s.EnableOnboardingFlow = NewBool(true) + } + + // Must be manually enabled for existing installations. + if s.ExtendSessionLengthWithActivity == nil { + s.ExtendSessionLengthWithActivity = NewBool(!isUpdate) + } + + if s.SessionLengthWebInDays == nil { + if isUpdate { + s.SessionLengthWebInDays = NewInt(180) + } else { + s.SessionLengthWebInDays = NewInt(30) + } + } + + if s.SessionLengthMobileInDays == nil { + if isUpdate { + s.SessionLengthMobileInDays = NewInt(180) + } else { + s.SessionLengthMobileInDays = NewInt(30) + } + } + + if s.SessionLengthSSOInDays == nil { + s.SessionLengthSSOInDays = NewInt(30) + } + + if s.SessionCacheInMinutes == nil { + s.SessionCacheInMinutes = NewInt(10) + } + + if s.SessionIdleTimeoutInMinutes == nil { + s.SessionIdleTimeoutInMinutes = NewInt(43200) + } + + if s.EnableCommands == nil { + s.EnableCommands = NewBool(true) + } + + if s.EnablePostUsernameOverride == nil { + s.EnablePostUsernameOverride = NewBool(false) + } + + if s.EnablePostIconOverride == nil { + s.EnablePostIconOverride = NewBool(false) + } + + if s.WebsocketPort == nil { + s.WebsocketPort = NewInt(80) + } + + if s.WebsocketSecurePort == nil { + s.WebsocketSecurePort = NewInt(443) + } + + if s.AllowCorsFrom == nil { + s.AllowCorsFrom = NewString(ServiceSettingsDefaultAllowCorsFrom) + } + + if s.CorsExposedHeaders == nil { + s.CorsExposedHeaders = NewString("") + } + + if s.CorsAllowCredentials == nil { + s.CorsAllowCredentials = NewBool(false) + } + + if s.CorsDebug == nil { + s.CorsDebug = NewBool(false) + } + + if s.AllowCookiesForSubdomains == nil { + s.AllowCookiesForSubdomains = NewBool(false) + } + + if s.WebserverMode == nil { + s.WebserverMode = NewString("gzip") + } else if *s.WebserverMode == "regular" { + *s.WebserverMode = "gzip" + } + + if s.EnableCustomEmoji == nil { + s.EnableCustomEmoji = NewBool(true) + } + + if s.EnableEmojiPicker == nil { + s.EnableEmojiPicker = NewBool(true) + } + + if s.EnableGifPicker == nil { + s.EnableGifPicker = NewBool(true) + } + + if s.GfycatAPIKey == nil || *s.GfycatAPIKey == "" { + s.GfycatAPIKey = NewString(ServiceSettingsDefaultGfycatAPIKey) + } + + if s.GfycatAPISecret == nil || *s.GfycatAPISecret == "" { + s.GfycatAPISecret = NewString(ServiceSettingsDefaultGfycatAPISecret) + } + + if s.ExperimentalEnableAuthenticationTransfer == nil { + s.ExperimentalEnableAuthenticationTransfer = NewBool(true) + } + + if s.PostEditTimeLimit == nil { + s.PostEditTimeLimit = NewInt(-1) + } + + if s.EnablePreviewFeatures == nil { + s.EnablePreviewFeatures = NewBool(true) + } + + if s.ExperimentalEnableDefaultChannelLeaveJoinMessages == nil { + s.ExperimentalEnableDefaultChannelLeaveJoinMessages = NewBool(true) + } + + if s.ExperimentalGroupUnreadChannels == nil { + s.ExperimentalGroupUnreadChannels = NewString(GroupUnreadChannelsDisabled) + } else if *s.ExperimentalGroupUnreadChannels == "0" { + s.ExperimentalGroupUnreadChannels = NewString(GroupUnreadChannelsDisabled) + } else if *s.ExperimentalGroupUnreadChannels == "1" { + s.ExperimentalGroupUnreadChannels = NewString(GroupUnreadChannelsDefaultOn) + } + + if s.EnableAPITeamDeletion == nil { + s.EnableAPITeamDeletion = NewBool(false) + } + + if s.EnableAPIUserDeletion == nil { + s.EnableAPIUserDeletion = NewBool(false) + } + + if s.EnableAPIChannelDeletion == nil { + s.EnableAPIChannelDeletion = NewBool(false) + } + + if s.ExperimentalEnableHardenedMode == nil { + s.ExperimentalEnableHardenedMode = NewBool(false) + } + + if s.ExperimentalStrictCSRFEnforcement == nil { + s.ExperimentalStrictCSRFEnforcement = NewBool(false) + } + + if s.DisableBotsWhenOwnerIsDeactivated == nil { + s.DisableBotsWhenOwnerIsDeactivated = NewBool(true) + } + + if s.EnableBotAccountCreation == nil { + s.EnableBotAccountCreation = NewBool(false) + } + + if s.EnableSVGs == nil { + if isUpdate { + s.EnableSVGs = NewBool(true) + } else { + s.EnableSVGs = NewBool(false) + } + } + + if s.EnableLatex == nil { + if isUpdate { + s.EnableLatex = NewBool(true) + } else { + s.EnableLatex = NewBool(false) + } + } + + if s.EnableInlineLatex == nil { + s.EnableInlineLatex = NewBool(true) + } + + if s.EnableLocalMode == nil { + s.EnableLocalMode = NewBool(false) + } + + if s.LocalModeSocketLocation == nil { + s.LocalModeSocketLocation = NewString(LocalModeSocketPath) + } + + if s.EnableAWSMetering == nil { + s.EnableAWSMetering = NewBool(false) + } + + if s.SplitKey == nil { + s.SplitKey = NewString("") + } + + if s.FeatureFlagSyncIntervalSeconds == nil { + s.FeatureFlagSyncIntervalSeconds = NewInt(30) + } + + if s.DebugSplit == nil { + s.DebugSplit = NewBool(false) + } + + if s.ThreadAutoFollow == nil { + s.ThreadAutoFollow = NewBool(true) + } + + if s.CollapsedThreads == nil { + s.CollapsedThreads = NewString(CollapsedThreadsDisabled) + } + + if s.ManagedResourcePaths == nil { + s.ManagedResourcePaths = NewString("") + } + + if s.EnableCustomGroups == nil { + s.EnableCustomGroups = NewBool(true) + } +} + +type ClusterSettings struct { + Enable *bool `access:"environment_high_availability,write_restrictable"` + ClusterName *string `access:"environment_high_availability,write_restrictable,cloud_restrictable"` // telemetry: none + OverrideHostname *string `access:"environment_high_availability,write_restrictable,cloud_restrictable"` // telemetry: none + NetworkInterface *string `access:"environment_high_availability,write_restrictable,cloud_restrictable"` + BindAddress *string `access:"environment_high_availability,write_restrictable,cloud_restrictable"` + AdvertiseAddress *string `access:"environment_high_availability,write_restrictable,cloud_restrictable"` + UseIPAddress *bool `access:"environment_high_availability,write_restrictable,cloud_restrictable"` + EnableGossipCompression *bool `access:"environment_high_availability,write_restrictable,cloud_restrictable"` + EnableExperimentalGossipEncryption *bool `access:"environment_high_availability,write_restrictable,cloud_restrictable"` + ReadOnlyConfig *bool `access:"environment_high_availability,write_restrictable,cloud_restrictable"` + GossipPort *int `access:"environment_high_availability,write_restrictable,cloud_restrictable"` // telemetry: none + StreamingPort *int `access:"environment_high_availability,write_restrictable,cloud_restrictable"` // telemetry: none + MaxIdleConns *int `access:"environment_high_availability,write_restrictable,cloud_restrictable"` // telemetry: none + MaxIdleConnsPerHost *int `access:"environment_high_availability,write_restrictable,cloud_restrictable"` // telemetry: none + IdleConnTimeoutMilliseconds *int `access:"environment_high_availability,write_restrictable,cloud_restrictable"` // telemetry: none +} + +func (s *ClusterSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.ClusterName == nil { + s.ClusterName = NewString("") + } + + if s.OverrideHostname == nil { + s.OverrideHostname = NewString("") + } + + if s.NetworkInterface == nil { + s.NetworkInterface = NewString("") + } + + if s.BindAddress == nil { + s.BindAddress = NewString("") + } + + if s.AdvertiseAddress == nil { + s.AdvertiseAddress = NewString("") + } + + if s.UseIPAddress == nil { + s.UseIPAddress = NewBool(true) + } + + if s.EnableExperimentalGossipEncryption == nil { + s.EnableExperimentalGossipEncryption = NewBool(false) + } + + if s.EnableGossipCompression == nil { + s.EnableGossipCompression = NewBool(true) + } + + if s.ReadOnlyConfig == nil { + s.ReadOnlyConfig = NewBool(true) + } + + if s.GossipPort == nil { + s.GossipPort = NewInt(8074) + } + + if s.StreamingPort == nil { + s.StreamingPort = NewInt(8075) + } + + if s.MaxIdleConns == nil { + s.MaxIdleConns = NewInt(100) + } + + if s.MaxIdleConnsPerHost == nil { + s.MaxIdleConnsPerHost = NewInt(128) + } + + if s.IdleConnTimeoutMilliseconds == nil { + s.IdleConnTimeoutMilliseconds = NewInt(90000) + } +} + +type MetricsSettings struct { + Enable *bool `access:"environment_performance_monitoring,write_restrictable,cloud_restrictable"` + BlockProfileRate *int `access:"environment_performance_monitoring,write_restrictable,cloud_restrictable"` + ListenAddress *string `access:"environment_performance_monitoring,write_restrictable,cloud_restrictable"` // telemetry: none +} + +func (s *MetricsSettings) SetDefaults() { + if s.ListenAddress == nil { + s.ListenAddress = NewString(":8067") + } + + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.BlockProfileRate == nil { + s.BlockProfileRate = NewInt(0) + } +} + +type ExperimentalSettings struct { + ClientSideCertEnable *bool `access:"experimental_features,cloud_restrictable"` + ClientSideCertCheck *string `access:"experimental_features,cloud_restrictable"` + EnableClickToReply *bool `access:"experimental_features,write_restrictable,cloud_restrictable"` + LinkMetadataTimeoutMilliseconds *int64 `access:"experimental_features,write_restrictable,cloud_restrictable"` + RestrictSystemAdmin *bool `access:"experimental_features,write_restrictable"` + UseNewSAMLLibrary *bool `access:"experimental_features,cloud_restrictable"` + CloudBilling *bool `access:"experimental_features,write_restrictable"` + EnableSharedChannels *bool `access:"experimental_features"` + EnableRemoteClusterService *bool `access:"experimental_features"` +} + +func (s *ExperimentalSettings) SetDefaults() { + if s.ClientSideCertEnable == nil { + s.ClientSideCertEnable = NewBool(false) + } + + if s.ClientSideCertCheck == nil { + s.ClientSideCertCheck = NewString(ClientSideCertCheckSecondaryAuth) + } + + if s.EnableClickToReply == nil { + s.EnableClickToReply = NewBool(false) + } + + if s.LinkMetadataTimeoutMilliseconds == nil { + s.LinkMetadataTimeoutMilliseconds = NewInt64(ExperimentalSettingsDefaultLinkMetadataTimeoutMilliseconds) + } + + if s.RestrictSystemAdmin == nil { + s.RestrictSystemAdmin = NewBool(false) + } + + if s.CloudBilling == nil { + s.CloudBilling = NewBool(false) + } + + if s.UseNewSAMLLibrary == nil { + s.UseNewSAMLLibrary = NewBool(false) + } + + if s.EnableSharedChannels == nil { + s.EnableSharedChannels = NewBool(false) + } + + if s.EnableRemoteClusterService == nil { + s.EnableRemoteClusterService = NewBool(false) + } +} + +type AnalyticsSettings struct { + MaxUsersForStatistics *int `access:"write_restrictable,cloud_restrictable"` +} + +func (s *AnalyticsSettings) SetDefaults() { + if s.MaxUsersForStatistics == nil { + s.MaxUsersForStatistics = NewInt(AnalyticsSettingsDefaultMaxUsersForStatistics) + } +} + +type SSOSettings struct { + Enable *bool `access:"authentication_openid"` + Secret *string `access:"authentication_openid"` // telemetry: none + Id *string `access:"authentication_openid"` // telemetry: none + Scope *string `access:"authentication_openid"` // telemetry: none + AuthEndpoint *string `access:"authentication_openid"` // telemetry: none + TokenEndpoint *string `access:"authentication_openid"` // telemetry: none + UserAPIEndpoint *string `access:"authentication_openid"` // telemetry: none + DiscoveryEndpoint *string `access:"authentication_openid"` // telemetry: none + ButtonText *string `access:"authentication_openid"` // telemetry: none + ButtonColor *string `access:"authentication_openid"` // telemetry: none +} + +func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userAPIEndpoint, buttonColor string) { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.Secret == nil { + s.Secret = NewString("") + } + + if s.Id == nil { + s.Id = NewString("") + } + + if s.Scope == nil { + s.Scope = NewString(scope) + } + + if s.DiscoveryEndpoint == nil { + s.DiscoveryEndpoint = NewString("") + } + + if s.AuthEndpoint == nil { + s.AuthEndpoint = NewString(authEndpoint) + } + + if s.TokenEndpoint == nil { + s.TokenEndpoint = NewString(tokenEndpoint) + } + + if s.UserAPIEndpoint == nil { + s.UserAPIEndpoint = NewString(userAPIEndpoint) + } + + if s.ButtonText == nil { + s.ButtonText = NewString("") + } + + if s.ButtonColor == nil { + s.ButtonColor = NewString(buttonColor) + } +} + +type Office365Settings struct { + Enable *bool `access:"authentication_openid"` + Secret *string `access:"authentication_openid"` // telemetry: none + Id *string `access:"authentication_openid"` // telemetry: none + Scope *string `access:"authentication_openid"` + AuthEndpoint *string `access:"authentication_openid"` // telemetry: none + TokenEndpoint *string `access:"authentication_openid"` // telemetry: none + UserAPIEndpoint *string `access:"authentication_openid"` // telemetry: none + DiscoveryEndpoint *string `access:"authentication_openid"` // telemetry: none + DirectoryId *string `access:"authentication_openid"` // telemetry: none +} + +func (s *Office365Settings) setDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.Id == nil { + s.Id = NewString("") + } + + if s.Secret == nil { + s.Secret = NewString("") + } + + if s.Scope == nil { + s.Scope = NewString(Office365SettingsDefaultScope) + } + + if s.DiscoveryEndpoint == nil { + s.DiscoveryEndpoint = NewString("") + } + + if s.AuthEndpoint == nil { + s.AuthEndpoint = NewString(Office365SettingsDefaultAuthEndpoint) + } + + if s.TokenEndpoint == nil { + s.TokenEndpoint = NewString(Office365SettingsDefaultTokenEndpoint) + } + + if s.UserAPIEndpoint == nil { + s.UserAPIEndpoint = NewString(Office365SettingsDefaultUserAPIEndpoint) + } + + if s.DirectoryId == nil { + s.DirectoryId = NewString("") + } +} + +func (s *Office365Settings) SSOSettings() *SSOSettings { + ssoSettings := SSOSettings{} + ssoSettings.Enable = s.Enable + ssoSettings.Secret = s.Secret + ssoSettings.Id = s.Id + ssoSettings.Scope = s.Scope + ssoSettings.DiscoveryEndpoint = s.DiscoveryEndpoint + ssoSettings.AuthEndpoint = s.AuthEndpoint + ssoSettings.TokenEndpoint = s.TokenEndpoint + ssoSettings.UserAPIEndpoint = s.UserAPIEndpoint + return &ssoSettings +} + +type ReplicaLagSettings struct { + DataSource *string `access:"environment,write_restrictable,cloud_restrictable"` // telemetry: none + QueryAbsoluteLag *string `access:"environment,write_restrictable,cloud_restrictable"` // telemetry: none + QueryTimeLag *string `access:"environment,write_restrictable,cloud_restrictable"` // telemetry: none +} + +type SqlSettings struct { + DriverName *string `access:"environment_database,write_restrictable,cloud_restrictable"` + DataSource *string `access:"environment_database,write_restrictable,cloud_restrictable"` // telemetry: none + DataSourceReplicas []string `access:"environment_database,write_restrictable,cloud_restrictable"` + DataSourceSearchReplicas []string `access:"environment_database,write_restrictable,cloud_restrictable"` + MaxIdleConns *int `access:"environment_database,write_restrictable,cloud_restrictable"` + ConnMaxLifetimeMilliseconds *int `access:"environment_database,write_restrictable,cloud_restrictable"` + ConnMaxIdleTimeMilliseconds *int `access:"environment_database,write_restrictable,cloud_restrictable"` + MaxOpenConns *int `access:"environment_database,write_restrictable,cloud_restrictable"` + Trace *bool `access:"environment_database,write_restrictable,cloud_restrictable"` + AtRestEncryptKey *string `access:"environment_database,write_restrictable,cloud_restrictable"` // telemetry: none + QueryTimeout *int `access:"environment_database,write_restrictable,cloud_restrictable"` + DisableDatabaseSearch *bool `access:"environment_database,write_restrictable,cloud_restrictable"` + MigrationsStatementTimeoutSeconds *int `access:"environment_database,write_restrictable,cloud_restrictable"` + ReplicaLagSettings []*ReplicaLagSettings `access:"environment_database,write_restrictable,cloud_restrictable"` // telemetry: none +} + +func (s *SqlSettings) SetDefaults(isUpdate bool) { + if s.DriverName == nil { + s.DriverName = NewString(DatabaseDriverPostgres) + } + + if s.DataSource == nil { + s.DataSource = NewString(SqlSettingsDefaultDataSource) + } + + if s.DataSourceReplicas == nil { + s.DataSourceReplicas = []string{} + } + + if s.DataSourceSearchReplicas == nil { + s.DataSourceSearchReplicas = []string{} + } + + if isUpdate { + // When updating an existing configuration, ensure an encryption key has been specified. + if s.AtRestEncryptKey == nil || *s.AtRestEncryptKey == "" { + s.AtRestEncryptKey = NewString(NewRandomString(32)) + } + } else { + // When generating a blank configuration, leave this key empty to be generated on server start. + s.AtRestEncryptKey = NewString("") + } + + if s.MaxIdleConns == nil { + s.MaxIdleConns = NewInt(20) + } + + if s.MaxOpenConns == nil { + s.MaxOpenConns = NewInt(300) + } + + if s.ConnMaxLifetimeMilliseconds == nil { + s.ConnMaxLifetimeMilliseconds = NewInt(3600000) + } + + if s.ConnMaxIdleTimeMilliseconds == nil { + s.ConnMaxIdleTimeMilliseconds = NewInt(300000) + } + + if s.Trace == nil { + s.Trace = NewBool(false) + } + + if s.QueryTimeout == nil { + s.QueryTimeout = NewInt(30) + } + + if s.DisableDatabaseSearch == nil { + s.DisableDatabaseSearch = NewBool(false) + } + + if s.MigrationsStatementTimeoutSeconds == nil { + s.MigrationsStatementTimeoutSeconds = NewInt(100000) + } + + if s.ReplicaLagSettings == nil { + s.ReplicaLagSettings = []*ReplicaLagSettings{} + } +} + +type LogSettings struct { + EnableConsole *bool `access:"environment_logging,write_restrictable,cloud_restrictable"` + ConsoleLevel *string `access:"environment_logging,write_restrictable,cloud_restrictable"` + ConsoleJson *bool `access:"environment_logging,write_restrictable,cloud_restrictable"` + EnableColor *bool `access:"environment_logging,write_restrictable,cloud_restrictable"` // telemetry: none + EnableFile *bool `access:"environment_logging,write_restrictable,cloud_restrictable"` + FileLevel *string `access:"environment_logging,write_restrictable,cloud_restrictable"` + FileJson *bool `access:"environment_logging,write_restrictable,cloud_restrictable"` + FileLocation *string `access:"environment_logging,write_restrictable,cloud_restrictable"` + EnableWebhookDebugging *bool `access:"environment_logging,write_restrictable,cloud_restrictable"` + EnableDiagnostics *bool `access:"environment_logging,write_restrictable,cloud_restrictable"` // telemetry: none + EnableSentry *bool `access:"environment_logging,write_restrictable,cloud_restrictable"` // telemetry: none + AdvancedLoggingConfig *string `access:"environment_logging,write_restrictable,cloud_restrictable"` +} + +func NewLogSettings() *LogSettings { + settings := &LogSettings{} + settings.SetDefaults() + return settings +} + +func (s *LogSettings) SetDefaults() { + if s.EnableConsole == nil { + s.EnableConsole = NewBool(true) + } + + if s.ConsoleLevel == nil { + s.ConsoleLevel = NewString("DEBUG") + } + + if s.EnableColor == nil { + s.EnableColor = NewBool(false) + } + + if s.EnableFile == nil { + s.EnableFile = NewBool(true) + } + + if s.FileLevel == nil { + s.FileLevel = NewString("INFO") + } + + if s.FileLocation == nil { + s.FileLocation = NewString("") + } + + if s.EnableWebhookDebugging == nil { + s.EnableWebhookDebugging = NewBool(true) + } + + if s.EnableDiagnostics == nil { + s.EnableDiagnostics = NewBool(true) + } + + if s.EnableSentry == nil { + s.EnableSentry = NewBool(*s.EnableDiagnostics) + } + + if s.ConsoleJson == nil { + s.ConsoleJson = NewBool(true) + } + + if s.FileJson == nil { + s.FileJson = NewBool(true) + } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } +} + +type ExperimentalAuditSettings struct { + FileEnabled *bool `access:"experimental_features,write_restrictable,cloud_restrictable"` + FileName *string `access:"experimental_features,write_restrictable,cloud_restrictable"` // telemetry: none + FileMaxSizeMB *int `access:"experimental_features,write_restrictable,cloud_restrictable"` + FileMaxAgeDays *int `access:"experimental_features,write_restrictable,cloud_restrictable"` + FileMaxBackups *int `access:"experimental_features,write_restrictable,cloud_restrictable"` + FileCompress *bool `access:"experimental_features,write_restrictable,cloud_restrictable"` + FileMaxQueueSize *int `access:"experimental_features,write_restrictable,cloud_restrictable"` + AdvancedLoggingConfig *string `access:"experimental_features,write_restrictable,cloud_restrictable"` +} + +func (s *ExperimentalAuditSettings) SetDefaults() { + if s.FileEnabled == nil { + s.FileEnabled = NewBool(false) + } + + if s.FileName == nil { + s.FileName = NewString("") + } + + if s.FileMaxSizeMB == nil { + s.FileMaxSizeMB = NewInt(100) + } + + if s.FileMaxAgeDays == nil { + s.FileMaxAgeDays = NewInt(0) // no limit on age + } + + if s.FileMaxBackups == nil { // no limit on number of backups + s.FileMaxBackups = NewInt(0) + } + + if s.FileCompress == nil { + s.FileCompress = NewBool(false) + } + + if s.FileMaxQueueSize == nil { + s.FileMaxQueueSize = NewInt(1000) + } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } +} + +type NotificationLogSettings struct { + EnableConsole *bool `access:"write_restrictable,cloud_restrictable"` + ConsoleLevel *string `access:"write_restrictable,cloud_restrictable"` + ConsoleJson *bool `access:"write_restrictable,cloud_restrictable"` + EnableColor *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none + EnableFile *bool `access:"write_restrictable,cloud_restrictable"` + FileLevel *string `access:"write_restrictable,cloud_restrictable"` + FileJson *bool `access:"write_restrictable,cloud_restrictable"` + FileLocation *string `access:"write_restrictable,cloud_restrictable"` + AdvancedLoggingConfig *string `access:"write_restrictable,cloud_restrictable"` +} + +func (s *NotificationLogSettings) SetDefaults() { + if s.EnableConsole == nil { + s.EnableConsole = NewBool(true) + } + + if s.ConsoleLevel == nil { + s.ConsoleLevel = NewString("DEBUG") + } + + if s.EnableFile == nil { + s.EnableFile = NewBool(true) + } + + if s.FileLevel == nil { + s.FileLevel = NewString("INFO") + } + + if s.FileLocation == nil { + s.FileLocation = NewString("") + } + + if s.ConsoleJson == nil { + s.ConsoleJson = NewBool(true) + } + + if s.EnableColor == nil { + s.EnableColor = NewBool(false) + } + + if s.FileJson == nil { + s.FileJson = NewBool(true) + } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } +} + +type PasswordSettings struct { + MinimumLength *int `access:"authentication_password"` + Lowercase *bool `access:"authentication_password"` + Number *bool `access:"authentication_password"` + Uppercase *bool `access:"authentication_password"` + Symbol *bool `access:"authentication_password"` +} + +func (s *PasswordSettings) SetDefaults() { + if s.MinimumLength == nil { + s.MinimumLength = NewInt(10) + } + + if s.Lowercase == nil { + s.Lowercase = NewBool(true) + } + + if s.Number == nil { + s.Number = NewBool(true) + } + + if s.Uppercase == nil { + s.Uppercase = NewBool(true) + } + + if s.Symbol == nil { + s.Symbol = NewBool(true) + } +} + +type FileSettings struct { + EnableFileAttachments *bool `access:"site_file_sharing_and_downloads,cloud_restrictable"` + EnableMobileUpload *bool `access:"site_file_sharing_and_downloads,cloud_restrictable"` + EnableMobileDownload *bool `access:"site_file_sharing_and_downloads,cloud_restrictable"` + MaxFileSize *int64 `access:"environment_file_storage,cloud_restrictable"` + MaxImageResolution *int64 `access:"environment_file_storage,cloud_restrictable"` + DriverName *string `access:"environment_file_storage,write_restrictable,cloud_restrictable"` + Directory *string `access:"environment_file_storage,write_restrictable,cloud_restrictable"` + EnablePublicLink *bool `access:"site_public_links,cloud_restrictable"` + ExtractContent *bool `access:"environment_file_storage,write_restrictable"` + ArchiveRecursion *bool `access:"environment_file_storage,write_restrictable"` + PublicLinkSalt *string `access:"site_public_links,cloud_restrictable"` // telemetry: none + InitialFont *string `access:"environment_file_storage,cloud_restrictable"` // telemetry: none + AmazonS3AccessKeyId *string `access:"environment_file_storage,write_restrictable,cloud_restrictable"` // telemetry: none + AmazonS3SecretAccessKey *string `access:"environment_file_storage,write_restrictable,cloud_restrictable"` // telemetry: none + AmazonS3Bucket *string `access:"environment_file_storage,write_restrictable,cloud_restrictable"` // telemetry: none + AmazonS3PathPrefix *string `access:"environment_file_storage,write_restrictable,cloud_restrictable"` // telemetry: none + AmazonS3Region *string `access:"environment_file_storage,write_restrictable,cloud_restrictable"` // telemetry: none + AmazonS3Endpoint *string `access:"environment_file_storage,write_restrictable,cloud_restrictable"` // telemetry: none + AmazonS3SSL *bool `access:"environment_file_storage,write_restrictable,cloud_restrictable"` + AmazonS3SignV2 *bool `access:"environment_file_storage,write_restrictable,cloud_restrictable"` + AmazonS3SSE *bool `access:"environment_file_storage,write_restrictable,cloud_restrictable"` + AmazonS3Trace *bool `access:"environment_file_storage,write_restrictable,cloud_restrictable"` +} + +func (s *FileSettings) SetDefaults(isUpdate bool) { + if s.EnableFileAttachments == nil { + s.EnableFileAttachments = NewBool(true) + } + + if s.EnableMobileUpload == nil { + s.EnableMobileUpload = NewBool(true) + } + + if s.EnableMobileDownload == nil { + s.EnableMobileDownload = NewBool(true) + } + + if s.MaxFileSize == nil { + s.MaxFileSize = NewInt64(100 * 1024 * 1024) // 100MB (IEC) + } + + if s.MaxImageResolution == nil { + s.MaxImageResolution = NewInt64(7680 * 4320) // 8K, ~33MPX + } + + if s.DriverName == nil { + s.DriverName = NewString(ImageDriverLocal) + } + + if s.Directory == nil || *s.Directory == "" { + s.Directory = NewString(FileSettingsDefaultDirectory) + } + + if s.EnablePublicLink == nil { + s.EnablePublicLink = NewBool(false) + } + + if s.ExtractContent == nil { + s.ExtractContent = NewBool(true) + } + + if s.ArchiveRecursion == nil { + s.ArchiveRecursion = NewBool(false) + } + + if isUpdate { + // When updating an existing configuration, ensure link salt has been specified. + if s.PublicLinkSalt == nil || *s.PublicLinkSalt == "" { + s.PublicLinkSalt = NewString(NewRandomString(32)) + } + } else { + // When generating a blank configuration, leave link salt empty to be generated on server start. + s.PublicLinkSalt = NewString("") + } + + if s.InitialFont == nil { + // Defaults to "nunito-bold.ttf" + s.InitialFont = NewString("nunito-bold.ttf") + } + + if s.AmazonS3AccessKeyId == nil { + s.AmazonS3AccessKeyId = NewString("") + } + + if s.AmazonS3SecretAccessKey == nil { + s.AmazonS3SecretAccessKey = NewString("") + } + + if s.AmazonS3Bucket == nil { + s.AmazonS3Bucket = NewString("") + } + + if s.AmazonS3PathPrefix == nil { + s.AmazonS3PathPrefix = NewString("") + } + + if s.AmazonS3Region == nil { + s.AmazonS3Region = NewString("") + } + + if s.AmazonS3Endpoint == nil || *s.AmazonS3Endpoint == "" { + // Defaults to "s3.amazonaws.com" + s.AmazonS3Endpoint = NewString("s3.amazonaws.com") + } + + if s.AmazonS3SSL == nil { + s.AmazonS3SSL = NewBool(true) // Secure by default. + } + + if s.AmazonS3SignV2 == nil { + s.AmazonS3SignV2 = new(bool) + // Signature v2 is not enabled by default. + } + + if s.AmazonS3SSE == nil { + s.AmazonS3SSE = NewBool(false) // Not Encrypted by default. + } + + if s.AmazonS3Trace == nil { + s.AmazonS3Trace = NewBool(false) + } +} + +func (s *FileSettings) ToFileBackendSettings(enableComplianceFeature bool) filestore.FileBackendSettings { + if *s.DriverName == ImageDriverLocal { + return filestore.FileBackendSettings{ + DriverName: *s.DriverName, + Directory: *s.Directory, + } + } + return filestore.FileBackendSettings{ + DriverName: *s.DriverName, + AmazonS3AccessKeyId: *s.AmazonS3AccessKeyId, + AmazonS3SecretAccessKey: *s.AmazonS3SecretAccessKey, + AmazonS3Bucket: *s.AmazonS3Bucket, + AmazonS3PathPrefix: *s.AmazonS3PathPrefix, + AmazonS3Region: *s.AmazonS3Region, + AmazonS3Endpoint: *s.AmazonS3Endpoint, + AmazonS3SSL: s.AmazonS3SSL == nil || *s.AmazonS3SSL, + AmazonS3SignV2: s.AmazonS3SignV2 != nil && *s.AmazonS3SignV2, + AmazonS3SSE: s.AmazonS3SSE != nil && *s.AmazonS3SSE && enableComplianceFeature, + AmazonS3Trace: s.AmazonS3Trace != nil && *s.AmazonS3Trace, + } +} + +type EmailSettings struct { + EnableSignUpWithEmail *bool `access:"authentication_email"` + EnableSignInWithEmail *bool `access:"authentication_email"` + EnableSignInWithUsername *bool `access:"authentication_email"` + SendEmailNotifications *bool `access:"site_notifications"` + UseChannelInEmailNotifications *bool `access:"experimental_features"` + RequireEmailVerification *bool `access:"authentication_email"` + FeedbackName *string `access:"site_notifications"` + FeedbackEmail *string `access:"site_notifications,cloud_restrictable"` + ReplyToAddress *string `access:"site_notifications,cloud_restrictable"` + FeedbackOrganization *string `access:"site_notifications"` + EnableSMTPAuth *bool `access:"environment_smtp,write_restrictable,cloud_restrictable"` + SMTPUsername *string `access:"environment_smtp,write_restrictable,cloud_restrictable"` // telemetry: none + SMTPPassword *string `access:"environment_smtp,write_restrictable,cloud_restrictable"` // telemetry: none + SMTPServer *string `access:"environment_smtp,write_restrictable,cloud_restrictable"` // telemetry: none + SMTPPort *string `access:"environment_smtp,write_restrictable,cloud_restrictable"` // telemetry: none + SMTPServerTimeout *int `access:"cloud_restrictable"` + ConnectionSecurity *string `access:"environment_smtp,write_restrictable,cloud_restrictable"` + SendPushNotifications *bool `access:"environment_push_notification_server"` + PushNotificationServer *string `access:"environment_push_notification_server"` // telemetry: none + PushNotificationContents *string `access:"site_notifications"` + PushNotificationBuffer *int // telemetry: none + EnableEmailBatching *bool `access:"site_notifications"` + EmailBatchingBufferSize *int `access:"experimental_features"` + EmailBatchingInterval *int `access:"experimental_features"` + EnablePreviewModeBanner *bool `access:"site_notifications"` + SkipServerCertificateVerification *bool `access:"environment_smtp,write_restrictable,cloud_restrictable"` + EmailNotificationContentsType *string `access:"site_notifications"` + LoginButtonColor *string `access:"experimental_features"` + LoginButtonBorderColor *string `access:"experimental_features"` + LoginButtonTextColor *string `access:"experimental_features"` + EnableInactivityEmail *bool +} + +func (s *EmailSettings) SetDefaults(isUpdate bool) { + if s.EnableSignUpWithEmail == nil { + s.EnableSignUpWithEmail = NewBool(true) + } + + if s.EnableSignInWithEmail == nil { + s.EnableSignInWithEmail = NewBool(*s.EnableSignUpWithEmail) + } + + if s.EnableSignInWithUsername == nil { + s.EnableSignInWithUsername = NewBool(true) + } + + if s.SendEmailNotifications == nil { + s.SendEmailNotifications = NewBool(true) + } + + if s.UseChannelInEmailNotifications == nil { + s.UseChannelInEmailNotifications = NewBool(false) + } + + if s.RequireEmailVerification == nil { + s.RequireEmailVerification = NewBool(false) + } + + if s.FeedbackName == nil { + s.FeedbackName = NewString("") + } + + if s.FeedbackEmail == nil { + s.FeedbackEmail = NewString("test@example.com") + } + + if s.ReplyToAddress == nil { + s.ReplyToAddress = NewString("test@example.com") + } + + if s.FeedbackOrganization == nil { + s.FeedbackOrganization = NewString(EmailSettingsDefaultFeedbackOrganization) + } + + if s.EnableSMTPAuth == nil { + if s.ConnectionSecurity == nil || *s.ConnectionSecurity == ConnSecurityNone { + s.EnableSMTPAuth = NewBool(false) + } else { + s.EnableSMTPAuth = NewBool(true) + } + } + + if s.SMTPUsername == nil { + s.SMTPUsername = NewString("") + } + + if s.SMTPPassword == nil { + s.SMTPPassword = NewString("") + } + + if s.SMTPServer == nil || *s.SMTPServer == "" { + s.SMTPServer = NewString(EmailSMTPDefaultServer) + } + + if s.SMTPPort == nil || *s.SMTPPort == "" { + s.SMTPPort = NewString(EmailSMTPDefaultPort) + } + + if s.SMTPServerTimeout == nil || *s.SMTPServerTimeout == 0 { + s.SMTPServerTimeout = NewInt(10) + } + + if s.ConnectionSecurity == nil || *s.ConnectionSecurity == ConnSecurityPlain { + s.ConnectionSecurity = NewString(ConnSecurityNone) + } + + if s.SendPushNotifications == nil { + s.SendPushNotifications = NewBool(!isUpdate) + } + + if s.PushNotificationServer == nil { + if isUpdate { + s.PushNotificationServer = NewString("") + } else { + s.PushNotificationServer = NewString(GenericNotificationServer) + } + } + + if s.PushNotificationContents == nil { + s.PushNotificationContents = NewString(FullNotification) + } + + if s.PushNotificationBuffer == nil { + s.PushNotificationBuffer = NewInt(1000) + } + + if s.EnableEmailBatching == nil { + s.EnableEmailBatching = NewBool(false) + } + + if s.EmailBatchingBufferSize == nil { + s.EmailBatchingBufferSize = NewInt(EmailBatchingBufferSize) + } + + if s.EmailBatchingInterval == nil { + s.EmailBatchingInterval = NewInt(EmailBatchingInterval) + } + + if s.EnablePreviewModeBanner == nil { + s.EnablePreviewModeBanner = NewBool(true) + } + + if s.EnableSMTPAuth == nil { + if *s.ConnectionSecurity == ConnSecurityNone { + s.EnableSMTPAuth = NewBool(false) + } else { + s.EnableSMTPAuth = NewBool(true) + } + } + + if *s.ConnectionSecurity == ConnSecurityPlain { + *s.ConnectionSecurity = ConnSecurityNone + } + + if s.SkipServerCertificateVerification == nil { + s.SkipServerCertificateVerification = NewBool(false) + } + + if s.EmailNotificationContentsType == nil { + s.EmailNotificationContentsType = NewString(EmailNotificationContentsFull) + } + + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#0000") + } + + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") + } + + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#2389D7") + } + + if s.EnableInactivityEmail == nil { + s.EnableInactivityEmail = NewBool(true) + } +} + +type RateLimitSettings struct { + Enable *bool `access:"environment_rate_limiting,write_restrictable,cloud_restrictable"` + PerSec *int `access:"environment_rate_limiting,write_restrictable,cloud_restrictable"` + MaxBurst *int `access:"environment_rate_limiting,write_restrictable,cloud_restrictable"` + MemoryStoreSize *int `access:"environment_rate_limiting,write_restrictable,cloud_restrictable"` + VaryByRemoteAddr *bool `access:"environment_rate_limiting,write_restrictable,cloud_restrictable"` + VaryByUser *bool `access:"environment_rate_limiting,write_restrictable,cloud_restrictable"` + VaryByHeader string `access:"environment_rate_limiting,write_restrictable,cloud_restrictable"` +} + +func (s *RateLimitSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.PerSec == nil { + s.PerSec = NewInt(10) + } + + if s.MaxBurst == nil { + s.MaxBurst = NewInt(100) + } + + if s.MemoryStoreSize == nil { + s.MemoryStoreSize = NewInt(10000) + } + + if s.VaryByRemoteAddr == nil { + s.VaryByRemoteAddr = NewBool(true) + } + + if s.VaryByUser == nil { + s.VaryByUser = NewBool(false) + } +} + +type PrivacySettings struct { + ShowEmailAddress *bool `access:"site_users_and_teams"` + ShowFullName *bool `access:"site_users_and_teams"` +} + +func (s *PrivacySettings) setDefaults() { + if s.ShowEmailAddress == nil { + s.ShowEmailAddress = NewBool(true) + } + + if s.ShowFullName == nil { + s.ShowFullName = NewBool(true) + } +} + +type SupportSettings struct { + TermsOfServiceLink *string `access:"site_customization,write_restrictable,cloud_restrictable"` + PrivacyPolicyLink *string `access:"site_customization,write_restrictable,cloud_restrictable"` + AboutLink *string `access:"site_customization,write_restrictable,cloud_restrictable"` + HelpLink *string `access:"site_customization,write_restrictable,cloud_restrictable"` + ReportAProblemLink *string `access:"site_customization,write_restrictable,cloud_restrictable"` + SupportEmail *string `access:"site_notifications"` + CustomTermsOfServiceEnabled *bool `access:"compliance_custom_terms_of_service"` + CustomTermsOfServiceReAcceptancePeriod *int `access:"compliance_custom_terms_of_service"` + EnableAskCommunityLink *bool `access:"site_customization"` +} + +func (s *SupportSettings) SetDefaults() { + if !isSafeLink(s.TermsOfServiceLink) { + *s.TermsOfServiceLink = SupportSettingsDefaultTermsOfServiceLink + } + + if s.TermsOfServiceLink == nil { + s.TermsOfServiceLink = NewString(SupportSettingsDefaultTermsOfServiceLink) + } + + if !isSafeLink(s.PrivacyPolicyLink) { + *s.PrivacyPolicyLink = "" + } + + if s.PrivacyPolicyLink == nil { + s.PrivacyPolicyLink = NewString(SupportSettingsDefaultPrivacyPolicyLink) + } + + if !isSafeLink(s.AboutLink) { + *s.AboutLink = "" + } + + if s.AboutLink == nil { + s.AboutLink = NewString(SupportSettingsDefaultAboutLink) + } + + if !isSafeLink(s.HelpLink) { + *s.HelpLink = "" + } + + if s.HelpLink == nil { + s.HelpLink = NewString(SupportSettingsDefaultHelpLink) + } + + if !isSafeLink(s.ReportAProblemLink) { + *s.ReportAProblemLink = "" + } + + if s.ReportAProblemLink == nil { + s.ReportAProblemLink = NewString(SupportSettingsDefaultReportAProblemLink) + } + + if s.SupportEmail == nil { + s.SupportEmail = NewString(SupportSettingsDefaultSupportEmail) + } + + if s.CustomTermsOfServiceEnabled == nil { + s.CustomTermsOfServiceEnabled = NewBool(false) + } + + if s.CustomTermsOfServiceReAcceptancePeriod == nil { + s.CustomTermsOfServiceReAcceptancePeriod = NewInt(SupportSettingsDefaultReAcceptancePeriod) + } + + if s.EnableAskCommunityLink == nil { + s.EnableAskCommunityLink = NewBool(true) + } +} + +type AnnouncementSettings struct { + EnableBanner *bool `access:"site_announcement_banner"` + BannerText *string `access:"site_announcement_banner"` // telemetry: none + BannerColor *string `access:"site_announcement_banner"` + BannerTextColor *string `access:"site_announcement_banner"` + AllowBannerDismissal *bool `access:"site_announcement_banner"` + AdminNoticesEnabled *bool `access:"site_notices"` + UserNoticesEnabled *bool `access:"site_notices"` + NoticesURL *string `access:"site_notices,write_restrictable"` // telemetry: none + NoticesFetchFrequency *int `access:"site_notices,write_restrictable"` // telemetry: none + NoticesSkipCache *bool `access:"site_notices,write_restrictable"` // telemetry: none +} + +func (s *AnnouncementSettings) SetDefaults() { + if s.EnableBanner == nil { + s.EnableBanner = NewBool(false) + } + + if s.BannerText == nil { + s.BannerText = NewString("") + } + + if s.BannerColor == nil { + s.BannerColor = NewString(AnnouncementSettingsDefaultBannerColor) + } + + if s.BannerTextColor == nil { + s.BannerTextColor = NewString(AnnouncementSettingsDefaultBannerTextColor) + } + + if s.AllowBannerDismissal == nil { + s.AllowBannerDismissal = NewBool(true) + } + + if s.AdminNoticesEnabled == nil { + s.AdminNoticesEnabled = NewBool(true) + } + + if s.UserNoticesEnabled == nil { + s.UserNoticesEnabled = NewBool(true) + } + if s.NoticesURL == nil { + s.NoticesURL = NewString(AnnouncementSettingsDefaultNoticesJsonURL) + } + if s.NoticesSkipCache == nil { + s.NoticesSkipCache = NewBool(false) + } + if s.NoticesFetchFrequency == nil { + s.NoticesFetchFrequency = NewInt(AnnouncementSettingsDefaultNoticesFetchFrequencySeconds) + } + +} + +type ThemeSettings struct { + EnableThemeSelection *bool `access:"experimental_features"` + DefaultTheme *string `access:"experimental_features"` + AllowCustomThemes *bool `access:"experimental_features"` + AllowedThemes []string +} + +func (s *ThemeSettings) SetDefaults() { + if s.EnableThemeSelection == nil { + s.EnableThemeSelection = NewBool(true) + } + + if s.DefaultTheme == nil { + s.DefaultTheme = NewString(TeamSettingsDefaultTeamText) + } + + if s.AllowCustomThemes == nil { + s.AllowCustomThemes = NewBool(true) + } + + if s.AllowedThemes == nil { + s.AllowedThemes = []string{} + } +} + +type TeamSettings struct { + SiteName *string `access:"site_customization"` + MaxUsersPerTeam *int `access:"site_users_and_teams"` + EnableUserCreation *bool `access:"authentication_signup"` + EnableOpenServer *bool `access:"authentication_signup"` + EnableUserDeactivation *bool `access:"experimental_features"` + RestrictCreationToDomains *string `access:"authentication_signup"` // telemetry: none + EnableCustomUserStatuses *bool `access:"site_users_and_teams"` + EnableCustomBrand *bool `access:"site_customization"` + CustomBrandText *string `access:"site_customization"` + CustomDescriptionText *string `access:"site_customization"` + RestrictDirectMessage *string `access:"site_users_and_teams"` + // In seconds. + UserStatusAwayTimeout *int64 `access:"experimental_features"` + MaxChannelsPerTeam *int64 `access:"site_users_and_teams"` + MaxNotificationsPerChannel *int64 `access:"environment_push_notification_server"` + EnableConfirmNotificationsToChannel *bool `access:"site_notifications"` + TeammateNameDisplay *string `access:"site_users_and_teams"` + ExperimentalViewArchivedChannels *bool `access:"experimental_features,site_users_and_teams"` + ExperimentalEnableAutomaticReplies *bool `access:"experimental_features"` + LockTeammateNameDisplay *bool `access:"site_users_and_teams"` + ExperimentalPrimaryTeam *string `access:"experimental_features"` + ExperimentalDefaultChannels []string `access:"experimental_features"` +} + +func (s *TeamSettings) SetDefaults() { + + if s.SiteName == nil || *s.SiteName == "" { + s.SiteName = NewString(TeamSettingsDefaultSiteName) + } + + if s.MaxUsersPerTeam == nil { + s.MaxUsersPerTeam = NewInt(TeamSettingsDefaultMaxUsersPerTeam) + } + + if s.EnableUserCreation == nil { + s.EnableUserCreation = NewBool(true) + } + + if s.EnableOpenServer == nil { + s.EnableOpenServer = NewBool(false) + } + + if s.RestrictCreationToDomains == nil { + s.RestrictCreationToDomains = NewString("") + } + + if s.EnableCustomUserStatuses == nil { + s.EnableCustomUserStatuses = NewBool(true) + } + + if s.EnableCustomBrand == nil { + s.EnableCustomBrand = NewBool(false) + } + + if s.EnableUserDeactivation == nil { + s.EnableUserDeactivation = NewBool(false) + } + + if s.CustomBrandText == nil { + s.CustomBrandText = NewString(TeamSettingsDefaultCustomBrandText) + } + + if s.CustomDescriptionText == nil { + s.CustomDescriptionText = NewString(TeamSettingsDefaultCustomDescriptionText) + } + + if s.RestrictDirectMessage == nil { + s.RestrictDirectMessage = NewString(DirectMessageAny) + } + + if s.UserStatusAwayTimeout == nil { + s.UserStatusAwayTimeout = NewInt64(TeamSettingsDefaultUserStatusAwayTimeout) + } + + if s.MaxChannelsPerTeam == nil { + s.MaxChannelsPerTeam = NewInt64(2000) + } + + if s.MaxNotificationsPerChannel == nil { + s.MaxNotificationsPerChannel = NewInt64(1000) + } + + if s.EnableConfirmNotificationsToChannel == nil { + s.EnableConfirmNotificationsToChannel = NewBool(true) + } + + if s.ExperimentalEnableAutomaticReplies == nil { + s.ExperimentalEnableAutomaticReplies = NewBool(false) + } + + if s.ExperimentalPrimaryTeam == nil { + s.ExperimentalPrimaryTeam = NewString("") + } + + if s.ExperimentalDefaultChannels == nil { + s.ExperimentalDefaultChannels = []string{} + } + + if s.EnableUserCreation == nil { + s.EnableUserCreation = NewBool(true) + } + + if s.ExperimentalViewArchivedChannels == nil { + s.ExperimentalViewArchivedChannels = NewBool(true) + } + + if s.LockTeammateNameDisplay == nil { + s.LockTeammateNameDisplay = NewBool(false) + } +} + +type ClientRequirements struct { + AndroidLatestVersion string `access:"write_restrictable,cloud_restrictable"` + AndroidMinVersion string `access:"write_restrictable,cloud_restrictable"` + IosLatestVersion string `access:"write_restrictable,cloud_restrictable"` + IosMinVersion string `access:"write_restrictable,cloud_restrictable"` +} + +type LdapSettings struct { + // Basic + Enable *bool `access:"authentication_ldap"` + EnableSync *bool `access:"authentication_ldap"` + LdapServer *string `access:"authentication_ldap"` // telemetry: none + LdapPort *int `access:"authentication_ldap"` // telemetry: none + ConnectionSecurity *string `access:"authentication_ldap"` + BaseDN *string `access:"authentication_ldap"` // telemetry: none + BindUsername *string `access:"authentication_ldap"` // telemetry: none + BindPassword *string `access:"authentication_ldap"` // telemetry: none + + // Filtering + UserFilter *string `access:"authentication_ldap"` // telemetry: none + GroupFilter *string `access:"authentication_ldap"` + GuestFilter *string `access:"authentication_ldap"` + EnableAdminFilter *bool + AdminFilter *string + + // Group Mapping + GroupDisplayNameAttribute *string `access:"authentication_ldap"` + GroupIdAttribute *string `access:"authentication_ldap"` + + // User Mapping + FirstNameAttribute *string `access:"authentication_ldap"` + LastNameAttribute *string `access:"authentication_ldap"` + EmailAttribute *string `access:"authentication_ldap"` + UsernameAttribute *string `access:"authentication_ldap"` + NicknameAttribute *string `access:"authentication_ldap"` + IdAttribute *string `access:"authentication_ldap"` + PositionAttribute *string `access:"authentication_ldap"` + LoginIdAttribute *string `access:"authentication_ldap"` + PictureAttribute *string `access:"authentication_ldap"` + + // Synchronization + SyncIntervalMinutes *int `access:"authentication_ldap"` + + // Advanced + SkipCertificateVerification *bool `access:"authentication_ldap"` + PublicCertificateFile *string `access:"authentication_ldap"` + PrivateKeyFile *string `access:"authentication_ldap"` + QueryTimeout *int `access:"authentication_ldap"` + MaxPageSize *int `access:"authentication_ldap"` + + // Customization + LoginFieldName *string `access:"authentication_ldap"` + + LoginButtonColor *string `access:"experimental_features"` + LoginButtonBorderColor *string `access:"experimental_features"` + LoginButtonTextColor *string `access:"experimental_features"` + + Trace *bool `access:"authentication_ldap"` // telemetry: none +} + +func (s *LdapSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + // When unset should default to LDAP Enabled + if s.EnableSync == nil { + s.EnableSync = NewBool(*s.Enable) + } + + if s.EnableAdminFilter == nil { + s.EnableAdminFilter = NewBool(false) + } + + if s.LdapServer == nil { + s.LdapServer = NewString("") + } + + if s.LdapPort == nil { + s.LdapPort = NewInt(389) + } + + if s.ConnectionSecurity == nil { + s.ConnectionSecurity = NewString("") + } + + if s.PublicCertificateFile == nil { + s.PublicCertificateFile = NewString("") + } + + if s.PrivateKeyFile == nil { + s.PrivateKeyFile = NewString("") + } + + if s.BaseDN == nil { + s.BaseDN = NewString("") + } + + if s.BindUsername == nil { + s.BindUsername = NewString("") + } + + if s.BindPassword == nil { + s.BindPassword = NewString("") + } + + if s.UserFilter == nil { + s.UserFilter = NewString("") + } + + if s.GuestFilter == nil { + s.GuestFilter = NewString("") + } + + if s.AdminFilter == nil { + s.AdminFilter = NewString("") + } + + if s.GroupFilter == nil { + s.GroupFilter = NewString("") + } + + if s.GroupDisplayNameAttribute == nil { + s.GroupDisplayNameAttribute = NewString(LdapSettingsDefaultGroupDisplayNameAttribute) + } + + if s.GroupIdAttribute == nil { + s.GroupIdAttribute = NewString(LdapSettingsDefaultGroupIdAttribute) + } + + if s.FirstNameAttribute == nil { + s.FirstNameAttribute = NewString(LdapSettingsDefaultFirstNameAttribute) + } + + if s.LastNameAttribute == nil { + s.LastNameAttribute = NewString(LdapSettingsDefaultLastNameAttribute) + } + + if s.EmailAttribute == nil { + s.EmailAttribute = NewString(LdapSettingsDefaultEmailAttribute) + } + + if s.UsernameAttribute == nil { + s.UsernameAttribute = NewString(LdapSettingsDefaultUsernameAttribute) + } + + if s.NicknameAttribute == nil { + s.NicknameAttribute = NewString(LdapSettingsDefaultNicknameAttribute) + } + + if s.IdAttribute == nil { + s.IdAttribute = NewString(LdapSettingsDefaultIdAttribute) + } + + if s.PositionAttribute == nil { + s.PositionAttribute = NewString(LdapSettingsDefaultPositionAttribute) + } + + if s.PictureAttribute == nil { + s.PictureAttribute = NewString(LdapSettingsDefaultPictureAttribute) + } + + // For those upgrading to the version when LoginIdAttribute was added + // they need IdAttribute == LoginIdAttribute not to break + if s.LoginIdAttribute == nil { + s.LoginIdAttribute = s.IdAttribute + } + + if s.SyncIntervalMinutes == nil { + s.SyncIntervalMinutes = NewInt(60) + } + + if s.SkipCertificateVerification == nil { + s.SkipCertificateVerification = NewBool(false) + } + + if s.QueryTimeout == nil { + s.QueryTimeout = NewInt(60) + } + + if s.MaxPageSize == nil { + s.MaxPageSize = NewInt(0) + } + + if s.LoginFieldName == nil { + s.LoginFieldName = NewString(LdapSettingsDefaultLoginFieldName) + } + + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#0000") + } + + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") + } + + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#2389D7") + } + + if s.Trace == nil { + s.Trace = NewBool(false) + } +} + +type ComplianceSettings struct { + Enable *bool `access:"compliance_compliance_monitoring"` + Directory *string `access:"compliance_compliance_monitoring"` // telemetry: none + EnableDaily *bool `access:"compliance_compliance_monitoring"` + BatchSize *int `access:"compliance_compliance_monitoring"` // telemetry: none +} + +func (s *ComplianceSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.Directory == nil { + s.Directory = NewString("./data/") + } + + if s.EnableDaily == nil { + s.EnableDaily = NewBool(false) + } + + if s.BatchSize == nil { + s.BatchSize = NewInt(30000) + } +} + +type LocalizationSettings struct { + DefaultServerLocale *string `access:"site_localization"` + DefaultClientLocale *string `access:"site_localization"` + AvailableLocales *string `access:"site_localization"` +} + +func (s *LocalizationSettings) SetDefaults() { + if s.DefaultServerLocale == nil { + s.DefaultServerLocale = NewString(DefaultLocale) + } + + if s.DefaultClientLocale == nil { + s.DefaultClientLocale = NewString(DefaultLocale) + } + + if s.AvailableLocales == nil { + s.AvailableLocales = NewString("") + } +} + +type SamlSettings struct { + // Basic + Enable *bool `access:"authentication_saml"` + EnableSyncWithLdap *bool `access:"authentication_saml"` + EnableSyncWithLdapIncludeAuth *bool `access:"authentication_saml"` + IgnoreGuestsLdapSync *bool `access:"authentication_saml"` + + Verify *bool `access:"authentication_saml"` + Encrypt *bool `access:"authentication_saml"` + SignRequest *bool `access:"authentication_saml"` + + IdpURL *string `access:"authentication_saml"` // telemetry: none + IdpDescriptorURL *string `access:"authentication_saml"` // telemetry: none + IdpMetadataURL *string `access:"authentication_saml"` // telemetry: none + ServiceProviderIdentifier *string `access:"authentication_saml"` // telemetry: none + AssertionConsumerServiceURL *string `access:"authentication_saml"` // telemetry: none + + SignatureAlgorithm *string `access:"authentication_saml"` + CanonicalAlgorithm *string `access:"authentication_saml"` + + ScopingIDPProviderId *string `access:"authentication_saml"` + ScopingIDPName *string `access:"authentication_saml"` + + IdpCertificateFile *string `access:"authentication_saml"` // telemetry: none + PublicCertificateFile *string `access:"authentication_saml"` // telemetry: none + PrivateKeyFile *string `access:"authentication_saml"` // telemetry: none + + // User Mapping + IdAttribute *string `access:"authentication_saml"` + GuestAttribute *string `access:"authentication_saml"` + EnableAdminAttribute *bool + AdminAttribute *string + FirstNameAttribute *string `access:"authentication_saml"` + LastNameAttribute *string `access:"authentication_saml"` + EmailAttribute *string `access:"authentication_saml"` + UsernameAttribute *string `access:"authentication_saml"` + NicknameAttribute *string `access:"authentication_saml"` + LocaleAttribute *string `access:"authentication_saml"` + PositionAttribute *string `access:"authentication_saml"` + + LoginButtonText *string `access:"authentication_saml"` + + LoginButtonColor *string `access:"experimental_features"` + LoginButtonBorderColor *string `access:"experimental_features"` + LoginButtonTextColor *string `access:"experimental_features"` +} + +func (s *SamlSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.EnableSyncWithLdap == nil { + s.EnableSyncWithLdap = NewBool(false) + } + + if s.EnableSyncWithLdapIncludeAuth == nil { + s.EnableSyncWithLdapIncludeAuth = NewBool(false) + } + + if s.IgnoreGuestsLdapSync == nil { + s.IgnoreGuestsLdapSync = NewBool(false) + } + + if s.EnableAdminAttribute == nil { + s.EnableAdminAttribute = NewBool(false) + } + + if s.Verify == nil { + s.Verify = NewBool(true) + } + + if s.Encrypt == nil { + s.Encrypt = NewBool(true) + } + + if s.SignRequest == nil { + s.SignRequest = NewBool(false) + } + + if s.SignatureAlgorithm == nil { + s.SignatureAlgorithm = NewString(SamlSettingsDefaultSignatureAlgorithm) + } + + if s.CanonicalAlgorithm == nil { + s.CanonicalAlgorithm = NewString(SamlSettingsDefaultCanonicalAlgorithm) + } + + if s.IdpURL == nil { + s.IdpURL = NewString("") + } + + if s.IdpDescriptorURL == nil { + s.IdpDescriptorURL = NewString("") + } + + if s.ServiceProviderIdentifier == nil { + if s.IdpDescriptorURL != nil { + s.ServiceProviderIdentifier = NewString(*s.IdpDescriptorURL) + } else { + s.ServiceProviderIdentifier = NewString("") + } + } + + if s.IdpMetadataURL == nil { + s.IdpMetadataURL = NewString("") + } + + if s.IdpCertificateFile == nil { + s.IdpCertificateFile = NewString("") + } + + if s.PublicCertificateFile == nil { + s.PublicCertificateFile = NewString("") + } + + if s.PrivateKeyFile == nil { + s.PrivateKeyFile = NewString("") + } + + if s.AssertionConsumerServiceURL == nil { + s.AssertionConsumerServiceURL = NewString("") + } + + if s.ScopingIDPProviderId == nil { + s.ScopingIDPProviderId = NewString("") + } + + if s.ScopingIDPName == nil { + s.ScopingIDPName = NewString("") + } + + if s.LoginButtonText == nil || *s.LoginButtonText == "" { + s.LoginButtonText = NewString(UserAuthServiceSamlText) + } + + if s.IdAttribute == nil { + s.IdAttribute = NewString(SamlSettingsDefaultIdAttribute) + } + + if s.GuestAttribute == nil { + s.GuestAttribute = NewString(SamlSettingsDefaultGuestAttribute) + } + if s.AdminAttribute == nil { + s.AdminAttribute = NewString(SamlSettingsDefaultAdminAttribute) + } + if s.FirstNameAttribute == nil { + s.FirstNameAttribute = NewString(SamlSettingsDefaultFirstNameAttribute) + } + + if s.LastNameAttribute == nil { + s.LastNameAttribute = NewString(SamlSettingsDefaultLastNameAttribute) + } + + if s.EmailAttribute == nil { + s.EmailAttribute = NewString(SamlSettingsDefaultEmailAttribute) + } + + if s.UsernameAttribute == nil { + s.UsernameAttribute = NewString(SamlSettingsDefaultUsernameAttribute) + } + + if s.NicknameAttribute == nil { + s.NicknameAttribute = NewString(SamlSettingsDefaultNicknameAttribute) + } + + if s.PositionAttribute == nil { + s.PositionAttribute = NewString(SamlSettingsDefaultPositionAttribute) + } + + if s.LocaleAttribute == nil { + s.LocaleAttribute = NewString(SamlSettingsDefaultLocaleAttribute) + } + + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#34a28b") + } + + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") + } + + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#ffffff") + } +} + +type NativeAppSettings struct { + AppCustomURLSchemes []string `access:"site_customization,write_restrictable,cloud_restrictable"` // telemetry: none + AppDownloadLink *string `access:"site_customization,write_restrictable,cloud_restrictable"` + AndroidAppDownloadLink *string `access:"site_customization,write_restrictable,cloud_restrictable"` + IosAppDownloadLink *string `access:"site_customization,write_restrictable,cloud_restrictable"` +} + +func (s *NativeAppSettings) SetDefaults() { + if s.AppDownloadLink == nil { + s.AppDownloadLink = NewString(NativeappSettingsDefaultAppDownloadLink) + } + + if s.AndroidAppDownloadLink == nil { + s.AndroidAppDownloadLink = NewString(NativeappSettingsDefaultAndroidAppDownloadLink) + } + + if s.IosAppDownloadLink == nil { + s.IosAppDownloadLink = NewString(NativeappSettingsDefaultIosAppDownloadLink) + } + + if s.AppCustomURLSchemes == nil { + s.AppCustomURLSchemes = GetDefaultAppCustomURLSchemes() + } +} + +type ElasticsearchSettings struct { + ConnectionURL *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + Username *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + Password *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + EnableIndexing *bool `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + EnableSearching *bool `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + EnableAutocomplete *bool `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + Sniff *bool `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + PostIndexReplicas *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + PostIndexShards *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + ChannelIndexReplicas *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + ChannelIndexShards *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + UserIndexReplicas *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + UserIndexShards *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + AggregatePostsAfterDays *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` // telemetry: none + PostsAggregatorJobStartTime *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` // telemetry: none + IndexPrefix *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + LiveIndexingBatchSize *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + BulkIndexingTimeWindowSeconds *int `json:",omitempty"` // telemetry: none + BatchSize *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + RequestTimeoutSeconds *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + SkipTLSVerification *bool `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` + Trace *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` +} + +func (s *ElasticsearchSettings) SetDefaults() { + if s.ConnectionURL == nil { + s.ConnectionURL = NewString(ElasticsearchSettingsDefaultConnectionURL) + } + + if s.Username == nil { + s.Username = NewString(ElasticsearchSettingsDefaultUsername) + } + + if s.Password == nil { + s.Password = NewString(ElasticsearchSettingsDefaultPassword) + } + + if s.EnableIndexing == nil { + s.EnableIndexing = NewBool(false) + } + + if s.EnableSearching == nil { + s.EnableSearching = NewBool(false) + } + + if s.EnableAutocomplete == nil { + s.EnableAutocomplete = NewBool(false) + } + + if s.Sniff == nil { + s.Sniff = NewBool(true) + } + + if s.PostIndexReplicas == nil { + s.PostIndexReplicas = NewInt(ElasticsearchSettingsDefaultPostIndexReplicas) + } + + if s.PostIndexShards == nil { + s.PostIndexShards = NewInt(ElasticsearchSettingsDefaultPostIndexShards) + } + + if s.ChannelIndexReplicas == nil { + s.ChannelIndexReplicas = NewInt(ElasticsearchSettingsDefaultChannelIndexReplicas) + } + + if s.ChannelIndexShards == nil { + s.ChannelIndexShards = NewInt(ElasticsearchSettingsDefaultChannelIndexShards) + } + + if s.UserIndexReplicas == nil { + s.UserIndexReplicas = NewInt(ElasticsearchSettingsDefaultUserIndexReplicas) + } + + if s.UserIndexShards == nil { + s.UserIndexShards = NewInt(ElasticsearchSettingsDefaultUserIndexShards) + } + + if s.AggregatePostsAfterDays == nil { + s.AggregatePostsAfterDays = NewInt(ElasticsearchSettingsDefaultAggregatePostsAfterDays) + } + + if s.PostsAggregatorJobStartTime == nil { + s.PostsAggregatorJobStartTime = NewString(ElasticsearchSettingsDefaultPostsAggregatorJobStartTime) + } + + if s.IndexPrefix == nil { + s.IndexPrefix = NewString(ElasticsearchSettingsDefaultIndexPrefix) + } + + if s.LiveIndexingBatchSize == nil { + s.LiveIndexingBatchSize = NewInt(ElasticsearchSettingsDefaultLiveIndexingBatchSize) + } + + if s.BatchSize == nil { + s.BatchSize = NewInt(ElasticsearchSettingsDefaultBatchSize) + } + + if s.RequestTimeoutSeconds == nil { + s.RequestTimeoutSeconds = NewInt(ElasticsearchSettingsDefaultRequestTimeoutSeconds) + } + + if s.SkipTLSVerification == nil { + s.SkipTLSVerification = NewBool(false) + } + + if s.Trace == nil { + s.Trace = NewString("") + } +} + +type BleveSettings struct { + IndexDir *string `access:"experimental_bleve"` // telemetry: none + EnableIndexing *bool `access:"experimental_bleve"` + EnableSearching *bool `access:"experimental_bleve"` + EnableAutocomplete *bool `access:"experimental_bleve"` + BulkIndexingTimeWindowSeconds *int `json:",omitempty"` // telemetry: none + BatchSize *int `access:"experimental_bleve"` +} + +func (bs *BleveSettings) SetDefaults() { + if bs.IndexDir == nil { + bs.IndexDir = NewString(BleveSettingsDefaultIndexDir) + } + + if bs.EnableIndexing == nil { + bs.EnableIndexing = NewBool(false) + } + + if bs.EnableSearching == nil { + bs.EnableSearching = NewBool(false) + } + + if bs.EnableAutocomplete == nil { + bs.EnableAutocomplete = NewBool(false) + } + + if bs.BatchSize == nil { + bs.BatchSize = NewInt(BleveSettingsDefaultBatchSize) + } +} + +type DataRetentionSettings struct { + EnableMessageDeletion *bool `access:"compliance_data_retention_policy"` + EnableFileDeletion *bool `access:"compliance_data_retention_policy"` + EnableBoardsDeletion *bool `access:"compliance_data_retention_policy"` + MessageRetentionDays *int `access:"compliance_data_retention_policy"` + FileRetentionDays *int `access:"compliance_data_retention_policy"` + BoardsRetentionDays *int `access:"compliance_data_retention_policy"` + DeletionJobStartTime *string `access:"compliance_data_retention_policy"` + BatchSize *int `access:"compliance_data_retention_policy"` +} + +func (s *DataRetentionSettings) SetDefaults() { + if s.EnableMessageDeletion == nil { + s.EnableMessageDeletion = NewBool(false) + } + + if s.EnableFileDeletion == nil { + s.EnableFileDeletion = NewBool(false) + } + + if s.EnableBoardsDeletion == nil { + s.EnableBoardsDeletion = NewBool(false) + } + + if s.MessageRetentionDays == nil { + s.MessageRetentionDays = NewInt(DataRetentionSettingsDefaultMessageRetentionDays) + } + + if s.FileRetentionDays == nil { + s.FileRetentionDays = NewInt(DataRetentionSettingsDefaultFileRetentionDays) + } + + if s.BoardsRetentionDays == nil { + s.BoardsRetentionDays = NewInt(DataRetentionSettingsDefaultBoardsRetentionDays) + } + + if s.DeletionJobStartTime == nil { + s.DeletionJobStartTime = NewString(DataRetentionSettingsDefaultDeletionJobStartTime) + } + + if s.BatchSize == nil { + s.BatchSize = NewInt(DataRetentionSettingsDefaultBatchSize) + } +} + +type JobSettings struct { + RunJobs *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none + RunScheduler *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none + CleanupJobsThresholdDays *int `access:"write_restrictable,cloud_restrictable"` + CleanupConfigThresholdDays *int `access:"write_restrictable,cloud_restrictable"` +} + +func (s *JobSettings) SetDefaults() { + if s.RunJobs == nil { + s.RunJobs = NewBool(true) + } + + if s.RunScheduler == nil { + s.RunScheduler = NewBool(true) + } + + if s.CleanupJobsThresholdDays == nil { + s.CleanupJobsThresholdDays = NewInt(-1) + } + + if s.CleanupConfigThresholdDays == nil { + s.CleanupConfigThresholdDays = NewInt(-1) + } +} + +type CloudSettings struct { + CWSURL *string `access:"write_restrictable"` + CWSAPIURL *string `access:"write_restrictable"` +} + +func (s *CloudSettings) SetDefaults() { + if s.CWSURL == nil { + s.CWSURL = NewString(CloudSettingsDefaultCwsURL) + } + if s.CWSAPIURL == nil { + s.CWSAPIURL = NewString(CloudSettingsDefaultCwsAPIURL) + } +} + +type PluginState struct { + Enable bool +} + +type PluginSettings struct { + Enable *bool `access:"plugins,write_restrictable"` + EnableUploads *bool `access:"plugins,write_restrictable,cloud_restrictable"` + AllowInsecureDownloadURL *bool `access:"plugins,write_restrictable,cloud_restrictable"` + EnableHealthCheck *bool `access:"plugins,write_restrictable,cloud_restrictable"` + Directory *string `access:"plugins,write_restrictable,cloud_restrictable"` // telemetry: none + ClientDirectory *string `access:"plugins,write_restrictable,cloud_restrictable"` // telemetry: none + Plugins map[string]map[string]interface{} `access:"plugins"` // telemetry: none + PluginStates map[string]*PluginState `access:"plugins"` // telemetry: none + EnableMarketplace *bool `access:"plugins,write_restrictable,cloud_restrictable"` + EnableRemoteMarketplace *bool `access:"plugins,write_restrictable,cloud_restrictable"` + AutomaticPrepackagedPlugins *bool `access:"plugins,write_restrictable,cloud_restrictable"` + RequirePluginSignature *bool `access:"plugins,write_restrictable,cloud_restrictable"` + MarketplaceURL *string `access:"plugins,write_restrictable,cloud_restrictable"` + SignaturePublicKeyFiles []string `access:"plugins,write_restrictable,cloud_restrictable"` + ChimeraOAuthProxyURL *string `access:"plugins,write_restrictable,cloud_restrictable"` +} + +func (s *PluginSettings) SetDefaults(ls LogSettings) { + if s.Enable == nil { + s.Enable = NewBool(true) + } + + if s.EnableUploads == nil { + s.EnableUploads = NewBool(false) + } + + if s.AllowInsecureDownloadURL == nil { + s.AllowInsecureDownloadURL = NewBool(false) + } + + if s.EnableHealthCheck == nil { + s.EnableHealthCheck = NewBool(true) + } + + if s.Directory == nil || *s.Directory == "" { + s.Directory = NewString(PluginSettingsDefaultDirectory) + } + + if s.ClientDirectory == nil || *s.ClientDirectory == "" { + s.ClientDirectory = NewString(PluginSettingsDefaultClientDirectory) + } + + if s.Plugins == nil { + s.Plugins = make(map[string]map[string]interface{}) + } + + if s.PluginStates == nil { + s.PluginStates = make(map[string]*PluginState) + } + + if s.PluginStates["com.mattermost.nps"] == nil { + // Enable the NPS plugin by default if diagnostics are enabled + s.PluginStates["com.mattermost.nps"] = &PluginState{Enable: ls.EnableDiagnostics == nil || *ls.EnableDiagnostics} + } + + if s.PluginStates["playbooks"] == nil { + // Enable the playbooks plugin by default + s.PluginStates["playbooks"] = &PluginState{Enable: true} + } + + if s.PluginStates["com.mattermost.plugin-channel-export"] == nil && BuildEnterpriseReady == "true" { + // Enable the channel export plugin by default + s.PluginStates["com.mattermost.plugin-channel-export"] = &PluginState{Enable: true} + } + + if s.PluginStates["focalboard"] == nil { + // Enable the focalboard plugin by default + s.PluginStates["focalboard"] = &PluginState{Enable: true} + } + + if s.PluginStates["com.mattermost.apps"] == nil { + // Enable the Apps plugin by default + s.PluginStates["com.mattermost.apps"] = &PluginState{Enable: true} + } + + if s.EnableMarketplace == nil { + s.EnableMarketplace = NewBool(PluginSettingsDefaultEnableMarketplace) + } + + if s.EnableRemoteMarketplace == nil { + s.EnableRemoteMarketplace = NewBool(true) + } + + if s.AutomaticPrepackagedPlugins == nil { + s.AutomaticPrepackagedPlugins = NewBool(true) + } + + if s.MarketplaceURL == nil || *s.MarketplaceURL == "" || *s.MarketplaceURL == PluginSettingsOldMarketplaceURL { + s.MarketplaceURL = NewString(PluginSettingsDefaultMarketplaceURL) + } + + if s.RequirePluginSignature == nil { + s.RequirePluginSignature = NewBool(false) + } + + if s.SignaturePublicKeyFiles == nil { + s.SignaturePublicKeyFiles = []string{} + } + + if s.ChimeraOAuthProxyURL == nil { + s.ChimeraOAuthProxyURL = NewString("") + } +} + +type GlobalRelayMessageExportSettings struct { + CustomerType *string `access:"compliance_compliance_export"` // must be either A9 or A10, dictates SMTP server url + SMTPUsername *string `access:"compliance_compliance_export"` + SMTPPassword *string `access:"compliance_compliance_export"` + EmailAddress *string `access:"compliance_compliance_export"` // the address to send messages to + SMTPServerTimeout *int `access:"compliance_compliance_export"` +} + +func (s *GlobalRelayMessageExportSettings) SetDefaults() { + if s.CustomerType == nil { + s.CustomerType = NewString(GlobalrelayCustomerTypeA9) + } + if s.SMTPUsername == nil { + s.SMTPUsername = NewString("") + } + if s.SMTPPassword == nil { + s.SMTPPassword = NewString("") + } + if s.EmailAddress == nil { + s.EmailAddress = NewString("") + } + if s.SMTPServerTimeout == nil || *s.SMTPServerTimeout == 0 { + s.SMTPServerTimeout = NewInt(1800) + } +} + +type MessageExportSettings struct { + EnableExport *bool `access:"compliance_compliance_export"` + ExportFormat *string `access:"compliance_compliance_export"` + DailyRunTime *string `access:"compliance_compliance_export"` + ExportFromTimestamp *int64 `access:"compliance_compliance_export"` + BatchSize *int `access:"compliance_compliance_export"` + DownloadExportResults *bool `access:"compliance_compliance_export"` + + // formatter-specific settings - these are only expected to be non-nil if ExportFormat is set to the associated format + GlobalRelaySettings *GlobalRelayMessageExportSettings `access:"compliance_compliance_export"` +} + +func (s *MessageExportSettings) SetDefaults() { + if s.EnableExport == nil { + s.EnableExport = NewBool(false) + } + + if s.DownloadExportResults == nil { + s.DownloadExportResults = NewBool(false) + } + + if s.ExportFormat == nil { + s.ExportFormat = NewString(ComplianceExportTypeActiance) + } + + if s.DailyRunTime == nil { + s.DailyRunTime = NewString("01:00") + } + + if s.ExportFromTimestamp == nil { + s.ExportFromTimestamp = NewInt64(0) + } + + if s.BatchSize == nil { + s.BatchSize = NewInt(10000) + } + + if s.GlobalRelaySettings == nil { + s.GlobalRelaySettings = &GlobalRelayMessageExportSettings{} + } + s.GlobalRelaySettings.SetDefaults() +} + +type DisplaySettings struct { + CustomURLSchemes []string `access:"site_customization"` + ExperimentalTimezone *bool `access:"experimental_features"` +} + +func (s *DisplaySettings) SetDefaults() { + if s.CustomURLSchemes == nil { + customURLSchemes := []string{} + s.CustomURLSchemes = customURLSchemes + } + + if s.ExperimentalTimezone == nil { + s.ExperimentalTimezone = NewBool(true) + } +} + +type GuestAccountsSettings struct { + Enable *bool `access:"authentication_guest_access"` + AllowEmailAccounts *bool `access:"authentication_guest_access"` + EnforceMultifactorAuthentication *bool `access:"authentication_guest_access"` + RestrictCreationToDomains *string `access:"authentication_guest_access"` +} + +func (s *GuestAccountsSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.AllowEmailAccounts == nil { + s.AllowEmailAccounts = NewBool(true) + } + + if s.EnforceMultifactorAuthentication == nil { + s.EnforceMultifactorAuthentication = NewBool(false) + } + + if s.RestrictCreationToDomains == nil { + s.RestrictCreationToDomains = NewString("") + } +} + +type ImageProxySettings struct { + Enable *bool `access:"environment_image_proxy"` + ImageProxyType *string `access:"environment_image_proxy"` + RemoteImageProxyURL *string `access:"environment_image_proxy"` + RemoteImageProxyOptions *string `access:"environment_image_proxy"` +} + +func (s *ImageProxySettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.ImageProxyType == nil { + s.ImageProxyType = NewString(ImageProxyTypeLocal) + } + + if s.RemoteImageProxyURL == nil { + s.RemoteImageProxyURL = NewString("") + } + + if s.RemoteImageProxyOptions == nil { + s.RemoteImageProxyOptions = NewString("") + } +} + +// ImportSettings defines configuration settings for file imports. +type ImportSettings struct { + // The directory where to store the imported files. + Directory *string + // The number of days to retain the imported files before deleting them. + RetentionDays *int +} + +func (s *ImportSettings) isValid() *AppError { + if *s.Directory == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.import.directory.app_error", nil, "", http.StatusBadRequest) + } + + if *s.RetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.import.retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +// SetDefaults applies the default settings to the struct. +func (s *ImportSettings) SetDefaults() { + if s.Directory == nil || *s.Directory == "" { + s.Directory = NewString(ImportSettingsDefaultDirectory) + } + + if s.RetentionDays == nil { + s.RetentionDays = NewInt(ImportSettingsDefaultRetentionDays) + } +} + +// ExportSettings defines configuration settings for file exports. +type ExportSettings struct { + // The directory where to store the exported files. + Directory *string // telemetry: none + // The number of days to retain the exported files before deleting them. + RetentionDays *int +} + +func (s *ExportSettings) isValid() *AppError { + if *s.Directory == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.export.directory.app_error", nil, "", http.StatusBadRequest) + } + + if *s.RetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.export.retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +// SetDefaults applies the default settings to the struct. +func (s *ExportSettings) SetDefaults() { + if s.Directory == nil || *s.Directory == "" { + s.Directory = NewString(ExportSettingsDefaultDirectory) + } + + if s.RetentionDays == nil { + s.RetentionDays = NewInt(ExportSettingsDefaultRetentionDays) + } +} + +type ConfigFunc func() *Config + +const ConfigAccessTagType = "access" +const ConfigAccessTagWriteRestrictable = "write_restrictable" +const ConfigAccessTagCloudRestrictable = "cloud_restrictable" + +// Allows read access if any PermissionSysconsoleRead* is allowed +const ConfigAccessTagAnySysConsoleRead = "*_read" + +// Config fields support the 'access' tag with the following values corresponding to the suffix of the associated +// PermissionSysconsole* permission Id: 'about', 'reporting', 'user_management_users', +// 'user_management_groups', 'user_management_teams', 'user_management_channels', +// 'user_management_permissions', 'environment_web_server', 'environment_database', 'environment_elasticsearch', +// 'environment_file_storage', 'environment_image_proxy', 'environment_smtp', 'environment_push_notification_server', +// 'environment_high_availability', 'environment_rate_limiting', 'environment_logging', 'environment_session_lengths', +// 'environment_performance_monitoring', 'environment_developer', 'site', 'authentication', 'plugins', +// 'integrations', 'compliance', 'plugins', and 'experimental'. They grant read and/or write access to the config field +// to roles without PermissionManageSystem. +// +// The 'access' tag '*_read' checks for any Sysconsole read permission and grants access if any read permission is allowed. +// +// By default config values can be written with PermissionManageSystem, but if ExperimentalSettings.RestrictSystemAdmin is true +// and the access tag contains the value 'write_restrictable', then even PermissionManageSystem, does not grant write access. +// +// PermissionManageSystem always grants read access. +// +// Config values with the access tag 'cloud_restrictable' mean that are marked to be filtered when it's used in a cloud licensed +// environment with ExperimentalSettings.RestrictedSystemAdmin set to true. +// +// Example: +// type HairSettings struct { +// // Colour is writeable with either PermissionSysconsoleWriteReporting or PermissionSysconsoleWriteUserManagementGroups. +// // It is readable by PermissionSysconsoleReadReporting and PermissionSysconsoleReadUserManagementGroups permissions. +// // PermissionManageSystem grants read and write access. +// Colour string `access:"reporting,user_management_groups"` +// +// // Length is only readable and writable via PermissionManageSystem. +// Length string +// +// // Product is only writeable by PermissionManageSystem if ExperimentalSettings.RestrictSystemAdmin is false. +// // PermissionManageSystem can always read the value. +// Product bool `access:write_restrictable` +// } +type Config struct { + ServiceSettings ServiceSettings + TeamSettings TeamSettings + ClientRequirements ClientRequirements + SqlSettings SqlSettings + LogSettings LogSettings + ExperimentalAuditSettings ExperimentalAuditSettings + NotificationLogSettings NotificationLogSettings + PasswordSettings PasswordSettings + FileSettings FileSettings + EmailSettings EmailSettings + RateLimitSettings RateLimitSettings + PrivacySettings PrivacySettings + SupportSettings SupportSettings + AnnouncementSettings AnnouncementSettings + ThemeSettings ThemeSettings + GitLabSettings SSOSettings + GoogleSettings SSOSettings + Office365Settings Office365Settings + OpenIdSettings SSOSettings + LdapSettings LdapSettings + ComplianceSettings ComplianceSettings + LocalizationSettings LocalizationSettings + SamlSettings SamlSettings + NativeAppSettings NativeAppSettings + ClusterSettings ClusterSettings + MetricsSettings MetricsSettings + ExperimentalSettings ExperimentalSettings + AnalyticsSettings AnalyticsSettings + ElasticsearchSettings ElasticsearchSettings + BleveSettings BleveSettings + DataRetentionSettings DataRetentionSettings + MessageExportSettings MessageExportSettings + JobSettings JobSettings + PluginSettings PluginSettings + DisplaySettings DisplaySettings + GuestAccountsSettings GuestAccountsSettings + ImageProxySettings ImageProxySettings + CloudSettings CloudSettings // telemetry: none + FeatureFlags *FeatureFlags `access:"*_read" json:",omitempty"` + ImportSettings ImportSettings // telemetry: none + ExportSettings ExportSettings +} + +func (o *Config) Clone() *Config { + buf, err := json.Marshal(o) + if err != nil { + panic(err) + } + var ret Config + err = json.Unmarshal(buf, &ret) + if err != nil { + panic(err) + } + return &ret +} + +func (o *Config) ToJSONFiltered(tagType, tagValue string) ([]byte, error) { + filteredConfigMap := structToMapFilteredByTag(*o, tagType, tagValue) + for key, value := range filteredConfigMap { + v, ok := value.(map[string]interface{}) + if ok && len(v) == 0 { + delete(filteredConfigMap, key) + } + } + return json.Marshal(filteredConfigMap) +} + +func (o *Config) GetSSOService(service string) *SSOSettings { + switch service { + case ServiceGitlab: + return &o.GitLabSettings + case ServiceGoogle: + return &o.GoogleSettings + case ServiceOffice365: + return o.Office365Settings.SSOSettings() + case ServiceOpenid: + return &o.OpenIdSettings + } + + return nil +} + +func ConfigFromJSON(data io.Reader) *Config { + var o *Config + json.NewDecoder(data).Decode(&o) + return o +} + +// isUpdate detects a pre-existing config based on whether SiteURL has been changed +func (o *Config) isUpdate() bool { + return o.ServiceSettings.SiteURL != nil +} + +func (o *Config) SetDefaults() { + isUpdate := o.isUpdate() + + o.LdapSettings.SetDefaults() + o.SamlSettings.SetDefaults() + + if o.TeamSettings.TeammateNameDisplay == nil { + o.TeamSettings.TeammateNameDisplay = NewString(ShowUsername) + + if *o.SamlSettings.Enable || *o.LdapSettings.Enable { + *o.TeamSettings.TeammateNameDisplay = ShowFullName + } + } + + o.SqlSettings.SetDefaults(isUpdate) + o.FileSettings.SetDefaults(isUpdate) + o.EmailSettings.SetDefaults(isUpdate) + o.PrivacySettings.setDefaults() + o.Office365Settings.setDefaults() + o.Office365Settings.setDefaults() + o.GitLabSettings.setDefaults("", "", "", "", "") + o.GoogleSettings.setDefaults(GoogleSettingsDefaultScope, GoogleSettingsDefaultAuthEndpoint, GoogleSettingsDefaultTokenEndpoint, GoogleSettingsDefaultUserAPIEndpoint, "") + o.OpenIdSettings.setDefaults(OpenidSettingsDefaultScope, "", "", "", "#145DBF") + o.ServiceSettings.SetDefaults(isUpdate) + o.PasswordSettings.SetDefaults() + o.TeamSettings.SetDefaults() + o.MetricsSettings.SetDefaults() + o.ExperimentalSettings.SetDefaults() + o.SupportSettings.SetDefaults() + o.AnnouncementSettings.SetDefaults() + o.ThemeSettings.SetDefaults() + o.ClusterSettings.SetDefaults() + o.PluginSettings.SetDefaults(o.LogSettings) + o.AnalyticsSettings.SetDefaults() + o.ComplianceSettings.SetDefaults() + o.LocalizationSettings.SetDefaults() + o.ElasticsearchSettings.SetDefaults() + o.BleveSettings.SetDefaults() + o.NativeAppSettings.SetDefaults() + o.DataRetentionSettings.SetDefaults() + o.RateLimitSettings.SetDefaults() + o.LogSettings.SetDefaults() + o.ExperimentalAuditSettings.SetDefaults() + o.NotificationLogSettings.SetDefaults() + o.JobSettings.SetDefaults() + o.MessageExportSettings.SetDefaults() + o.DisplaySettings.SetDefaults() + o.GuestAccountsSettings.SetDefaults() + o.ImageProxySettings.SetDefaults() + o.CloudSettings.SetDefaults() + if o.FeatureFlags == nil { + o.FeatureFlags = &FeatureFlags{} + o.FeatureFlags.SetDefaults() + } + o.ImportSettings.SetDefaults() + o.ExportSettings.SetDefaults() +} + +func (o *Config) IsValid() *AppError { + if *o.ServiceSettings.SiteURL == "" && *o.EmailSettings.EnableEmailBatching { + return NewAppError("Config.IsValid", "model.config.is_valid.site_url_email_batching.app_error", nil, "", http.StatusBadRequest) + } + + if *o.ClusterSettings.Enable && *o.EmailSettings.EnableEmailBatching { + return NewAppError("Config.IsValid", "model.config.is_valid.cluster_email_batching.app_error", nil, "", http.StatusBadRequest) + } + + if *o.ServiceSettings.SiteURL == "" && *o.ServiceSettings.AllowCookiesForSubdomains { + return NewAppError("Config.IsValid", "model.config.is_valid.allow_cookies_for_subdomains.app_error", nil, "", http.StatusBadRequest) + } + + if err := o.TeamSettings.isValid(); err != nil { + return err + } + + if err := o.SqlSettings.isValid(); err != nil { + return err + } + + if err := o.FileSettings.isValid(); err != nil { + return err + } + + if err := o.EmailSettings.isValid(); err != nil { + return err + } + + if err := o.LdapSettings.isValid(); err != nil { + return err + } + + if err := o.SamlSettings.isValid(); err != nil { + return err + } + + if *o.PasswordSettings.MinimumLength < PasswordMinimumLength || *o.PasswordSettings.MinimumLength > PasswordMaximumLength { + return NewAppError("Config.IsValid", "model.config.is_valid.password_length.app_error", map[string]interface{}{"MinLength": PasswordMinimumLength, "MaxLength": PasswordMaximumLength}, "", http.StatusBadRequest) + } + + if err := o.RateLimitSettings.isValid(); err != nil { + return err + } + + if err := o.ServiceSettings.isValid(); err != nil { + return err + } + + if err := o.ElasticsearchSettings.isValid(); err != nil { + return err + } + + if err := o.BleveSettings.isValid(); err != nil { + return err + } + + if err := o.DataRetentionSettings.isValid(); err != nil { + return err + } + + if err := o.LocalizationSettings.isValid(); err != nil { + return err + } + + if err := o.MessageExportSettings.isValid(); err != nil { + return err + } + + if err := o.DisplaySettings.isValid(); err != nil { + return err + } + + if err := o.ImageProxySettings.isValid(); err != nil { + return err + } + + if err := o.ImportSettings.isValid(); err != nil { + return err + } + return nil +} + +func (s *TeamSettings) isValid() *AppError { + if *s.MaxUsersPerTeam <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_users.app_error", nil, "", http.StatusBadRequest) + } + + if *s.MaxChannelsPerTeam <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_channels.app_error", nil, "", http.StatusBadRequest) + } + + if *s.MaxNotificationsPerChannel <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_notify_per_channel.app_error", nil, "", http.StatusBadRequest) + } + + if !(*s.RestrictDirectMessage == DirectMessageAny || *s.RestrictDirectMessage == DirectMessageTeam) { + return NewAppError("Config.IsValid", "model.config.is_valid.restrict_direct_message.app_error", nil, "", http.StatusBadRequest) + } + + if !(*s.TeammateNameDisplay == ShowFullName || *s.TeammateNameDisplay == ShowNicknameFullName || *s.TeammateNameDisplay == ShowUsername) { + return NewAppError("Config.IsValid", "model.config.is_valid.teammate_name_display.app_error", nil, "", http.StatusBadRequest) + } + + if len(*s.SiteName) > SitenameMaxLength { + return NewAppError("Config.IsValid", "model.config.is_valid.sitename_length.app_error", map[string]interface{}{"MaxLength": SitenameMaxLength}, "", http.StatusBadRequest) + } + + return nil +} + +func (s *SqlSettings) isValid() *AppError { + if *s.AtRestEncryptKey != "" && len(*s.AtRestEncryptKey) < 32 { + return NewAppError("Config.IsValid", "model.config.is_valid.encrypt_sql.app_error", nil, "", http.StatusBadRequest) + } + + if !(*s.DriverName == DatabaseDriverMysql || *s.DriverName == DatabaseDriverPostgres) { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_driver.app_error", nil, "", http.StatusBadRequest) + } + + if *s.MaxIdleConns <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_idle.app_error", nil, "", http.StatusBadRequest) + } + + if *s.ConnMaxLifetimeMilliseconds < 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_conn_max_lifetime_milliseconds.app_error", nil, "", http.StatusBadRequest) + } + + if *s.ConnMaxIdleTimeMilliseconds < 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_conn_max_idle_time_milliseconds.app_error", nil, "", http.StatusBadRequest) + } + + if *s.QueryTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_query_timeout.app_error", nil, "", http.StatusBadRequest) + } + + if *s.DataSource == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_data_src.app_error", nil, "", http.StatusBadRequest) + } + + if *s.MaxOpenConns <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_max_conn.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (s *FileSettings) isValid() *AppError { + if *s.MaxFileSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_file_size.app_error", nil, "", http.StatusBadRequest) + } + + if !(*s.DriverName == ImageDriverLocal || *s.DriverName == ImageDriverS3) { + return NewAppError("Config.IsValid", "model.config.is_valid.file_driver.app_error", nil, "", http.StatusBadRequest) + } + + if *s.PublicLinkSalt != "" && len(*s.PublicLinkSalt) < 32 { + return NewAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "", http.StatusBadRequest) + } + + if *s.Directory == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.directory.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (s *EmailSettings) isValid() *AppError { + if !(*s.ConnectionSecurity == ConnSecurityNone || *s.ConnectionSecurity == ConnSecurityTLS || *s.ConnectionSecurity == ConnSecurityStarttls || *s.ConnectionSecurity == ConnSecurityPlain) { + return NewAppError("Config.IsValid", "model.config.is_valid.email_security.app_error", nil, "", http.StatusBadRequest) + } + + if *s.EmailBatchingBufferSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_buffer_size.app_error", nil, "", http.StatusBadRequest) + } + + if *s.EmailBatchingInterval < 30 { + return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_interval.app_error", nil, "", http.StatusBadRequest) + } + + if !(*s.EmailNotificationContentsType == EmailNotificationContentsFull || *s.EmailNotificationContentsType == EmailNotificationContentsGeneric) { + return NewAppError("Config.IsValid", "model.config.is_valid.email_notification_contents_type.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (s *RateLimitSettings) isValid() *AppError { + if *s.MemoryStoreSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.rate_mem.app_error", nil, "", http.StatusBadRequest) + } + + if *s.PerSec <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.rate_sec.app_error", nil, "", http.StatusBadRequest) + } + + if *s.MaxBurst <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_burst.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (s *LdapSettings) isValid() *AppError { + if !(*s.ConnectionSecurity == ConnSecurityNone || *s.ConnectionSecurity == ConnSecurityTLS || *s.ConnectionSecurity == ConnSecurityStarttls) { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_security.app_error", nil, "", http.StatusBadRequest) + } + + if *s.SyncIntervalMinutes <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_sync_interval.app_error", nil, "", http.StatusBadRequest) + } + + if *s.MaxPageSize < 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_max_page_size.app_error", nil, "", http.StatusBadRequest) + } + + if *s.Enable { + if *s.LdapServer == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_server", nil, "", http.StatusBadRequest) + } + + if *s.BaseDN == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_basedn", nil, "", http.StatusBadRequest) + } + + if *s.EmailAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_email", nil, "", http.StatusBadRequest) + } + + if *s.UsernameAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_username", nil, "", http.StatusBadRequest) + } + + if *s.IdAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_id", nil, "", http.StatusBadRequest) + } + + if *s.LoginIdAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_login_id", nil, "", http.StatusBadRequest) + } + + if *s.UserFilter != "" { + if _, err := ldap.CompileFilter(*s.UserFilter); err != nil { + return NewAppError("ValidateFilter", "ent.ldap.validate_filter.app_error", nil, err.Error(), http.StatusBadRequest) + } + } + + if *s.GuestFilter != "" { + if _, err := ldap.CompileFilter(*s.GuestFilter); err != nil { + return NewAppError("LdapSettings.isValid", "ent.ldap.validate_guest_filter.app_error", nil, err.Error(), http.StatusBadRequest) + } + } + + if *s.AdminFilter != "" { + if _, err := ldap.CompileFilter(*s.AdminFilter); err != nil { + return NewAppError("LdapSettings.isValid", "ent.ldap.validate_admin_filter.app_error", nil, err.Error(), http.StatusBadRequest) + } + } + } + + return nil +} + +func (s *SamlSettings) isValid() *AppError { + if *s.Enable { + if *s.IdpURL == "" || !IsValidHTTPURL(*s.IdpURL) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_url.app_error", nil, "", http.StatusBadRequest) + } + + if *s.IdpDescriptorURL == "" || !IsValidHTTPURL(*s.IdpDescriptorURL) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_descriptor_url.app_error", nil, "", http.StatusBadRequest) + } + + if *s.IdpCertificateFile == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_cert.app_error", nil, "", http.StatusBadRequest) + } + + if *s.EmailAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) + } + + if *s.UsernameAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_username_attribute.app_error", nil, "", http.StatusBadRequest) + } + + if *s.ServiceProviderIdentifier == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_spidentifier_attribute.app_error", nil, "", http.StatusBadRequest) + } + + if *s.Verify { + if *s.AssertionConsumerServiceURL == "" || !IsValidHTTPURL(*s.AssertionConsumerServiceURL) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_assertion_consumer_service_url.app_error", nil, "", http.StatusBadRequest) + } + } + + if *s.Encrypt { + if *s.PrivateKeyFile == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_private_key.app_error", nil, "", http.StatusBadRequest) + } + + if *s.PublicCertificateFile == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_public_cert.app_error", nil, "", http.StatusBadRequest) + } + } + + if *s.EmailAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) + } + + if !(*s.SignatureAlgorithm == SamlSettingsSignatureAlgorithmSha1 || *s.SignatureAlgorithm == SamlSettingsSignatureAlgorithmSha256 || *s.SignatureAlgorithm == SamlSettingsSignatureAlgorithmSha512) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_signature_algorithm.app_error", nil, "", http.StatusBadRequest) + } + if !(*s.CanonicalAlgorithm == SamlSettingsCanonicalAlgorithmC14n || *s.CanonicalAlgorithm == SamlSettingsCanonicalAlgorithmC14n11) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_canonical_algorithm.app_error", nil, "", http.StatusBadRequest) + } + + if *s.GuestAttribute != "" { + if !(strings.Contains(*s.GuestAttribute, "=")) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_guest_attribute.app_error", nil, "", http.StatusBadRequest) + } + if len(strings.Split(*s.GuestAttribute, "=")) != 2 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_guest_attribute.app_error", nil, "", http.StatusBadRequest) + } + } + + if *s.AdminAttribute != "" { + if !(strings.Contains(*s.AdminAttribute, "=")) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_admin_attribute.app_error", nil, "", http.StatusBadRequest) + } + if len(strings.Split(*s.AdminAttribute, "=")) != 2 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_admin_attribute.app_error", nil, "", http.StatusBadRequest) + } + } + } + + return nil +} + +func (s *ServiceSettings) isValid() *AppError { + if !(*s.ConnectionSecurity == ConnSecurityNone || *s.ConnectionSecurity == ConnSecurityTLS) { + return NewAppError("Config.IsValid", "model.config.is_valid.webserver_security.app_error", nil, "", http.StatusBadRequest) + } + + if *s.ConnectionSecurity == ConnSecurityTLS && !*s.UseLetsEncrypt { + appErr := NewAppError("Config.IsValid", "model.config.is_valid.tls_cert_file_missing.app_error", nil, "", http.StatusBadRequest) + + if *s.TLSCertFile == "" { + return appErr + } else if _, err := os.Stat(*s.TLSCertFile); os.IsNotExist(err) { + return appErr + } + + appErr = NewAppError("Config.IsValid", "model.config.is_valid.tls_key_file_missing.app_error", nil, "", http.StatusBadRequest) + + if *s.TLSKeyFile == "" { + return appErr + } else if _, err := os.Stat(*s.TLSKeyFile); os.IsNotExist(err) { + return appErr + } + } + + if len(s.TLSOverwriteCiphers) > 0 { + for _, cipher := range s.TLSOverwriteCiphers { + if _, ok := ServerTLSSupportedCiphers[cipher]; !ok { + return NewAppError("Config.IsValid", "model.config.is_valid.tls_overwrite_cipher.app_error", map[string]interface{}{"name": cipher}, "", http.StatusBadRequest) + } + } + } + + if *s.ReadTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.read_timeout.app_error", nil, "", http.StatusBadRequest) + } + + if *s.WriteTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.write_timeout.app_error", nil, "", http.StatusBadRequest) + } + + if *s.TimeBetweenUserTypingUpdatesMilliseconds < 1000 { + return NewAppError("Config.IsValid", "model.config.is_valid.time_between_user_typing.app_error", nil, "", http.StatusBadRequest) + } + + if *s.MaximumLoginAttempts <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.login_attempts.app_error", nil, "", http.StatusBadRequest) + } + + if *s.SiteURL != "" { + if _, err := url.ParseRequestURI(*s.SiteURL); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, err.Error(), http.StatusBadRequest) + } + } + + if *s.WebsocketURL != "" { + if _, err := url.ParseRequestURI(*s.WebsocketURL); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.websocket_url.app_error", nil, err.Error(), http.StatusBadRequest) + } + } + + host, port, _ := net.SplitHostPort(*s.ListenAddress) + var isValidHost bool + if host == "" { + isValidHost = true + } else { + isValidHost = (net.ParseIP(host) != nil) || isDomainName(host) + } + portInt, err := strconv.Atoi(port) + if err != nil || !isValidHost || portInt < 0 || portInt > math.MaxUint16 { + return NewAppError("Config.IsValid", "model.config.is_valid.listen_address.app_error", nil, "", http.StatusBadRequest) + } + + if *s.ExperimentalGroupUnreadChannels != GroupUnreadChannelsDisabled && + *s.ExperimentalGroupUnreadChannels != GroupUnreadChannelsDefaultOn && + *s.ExperimentalGroupUnreadChannels != GroupUnreadChannelsDefaultOff { + return NewAppError("Config.IsValid", "model.config.is_valid.group_unread_channels.app_error", nil, "", http.StatusBadRequest) + } + + if *s.CollapsedThreads != CollapsedThreadsDisabled && !*s.ThreadAutoFollow { + return NewAppError("Config.IsValid", "model.config.is_valid.collapsed_threads.autofollow.app_error", nil, "", http.StatusBadRequest) + } + + if *s.CollapsedThreads != CollapsedThreadsDisabled && + *s.CollapsedThreads != CollapsedThreadsDefaultOn && + *s.CollapsedThreads != CollapsedThreadsDefaultOff { + return NewAppError("Config.IsValid", "model.config.is_valid.collapsed_threads.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (s *ElasticsearchSettings) isValid() *AppError { + if *s.EnableIndexing { + if *s.ConnectionURL == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.connection_url.app_error", nil, "", http.StatusBadRequest) + } + } + + if *s.EnableSearching && !*s.EnableIndexing { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_searching.app_error", nil, "", http.StatusBadRequest) + } + + if *s.EnableAutocomplete && !*s.EnableIndexing { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_autocomplete.app_error", nil, "", http.StatusBadRequest) + } + + if *s.AggregatePostsAfterDays < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.aggregate_posts_after_days.app_error", nil, "", http.StatusBadRequest) + } + + if _, err := time.Parse("15:04", *s.PostsAggregatorJobStartTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.posts_aggregator_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if *s.LiveIndexingBatchSize < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", nil, "", http.StatusBadRequest) + } + + minBatchSize := 1 + if *s.BatchSize < minBatchSize { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_batch_size.app_error", map[string]interface{}{"BatchSize": minBatchSize}, "", http.StatusBadRequest) + } + + if *s.RequestTimeoutSeconds < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.request_timeout_seconds.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (bs *BleveSettings) isValid() *AppError { + if *bs.EnableIndexing { + if *bs.IndexDir == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.filename.app_error", nil, "", http.StatusBadRequest) + } + } else { + if *bs.EnableSearching { + return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.enable_searching.app_error", nil, "", http.StatusBadRequest) + } + if *bs.EnableAutocomplete { + return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.enable_autocomplete.app_error", nil, "", http.StatusBadRequest) + } + } + minBatchSize := 1 + if *bs.BatchSize < minBatchSize { + return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.bulk_indexing_batch_size.app_error", map[string]interface{}{"BatchSize": minBatchSize}, "", http.StatusBadRequest) + } + + return nil +} + +func (s *DataRetentionSettings) isValid() *AppError { + if *s.MessageRetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.message_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + if *s.FileRetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.file_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + if _, err := time.Parse("15:04", *s.DeletionJobStartTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.deletion_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) + } + + return nil +} + +func (s *LocalizationSettings) isValid() *AppError { + if *s.AvailableLocales != "" { + if !strings.Contains(*s.AvailableLocales, *s.DefaultClientLocale) { + return NewAppError("Config.IsValid", "model.config.is_valid.localization.available_locales.app_error", nil, "", http.StatusBadRequest) + } + } + + return nil +} + +func (s *MessageExportSettings) isValid() *AppError { + if s.EnableExport == nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.enable.app_error", nil, "", http.StatusBadRequest) + } + if *s.EnableExport { + if s.ExportFromTimestamp == nil || *s.ExportFromTimestamp < 0 || *s.ExportFromTimestamp > GetMillis() { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_from.app_error", nil, "", http.StatusBadRequest) + } else if s.DailyRunTime == nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, "", http.StatusBadRequest) + } else if _, err := time.Parse("15:04", *s.DailyRunTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, err.Error(), http.StatusBadRequest) + } else if s.BatchSize == nil || *s.BatchSize < 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.batch_size.app_error", nil, "", http.StatusBadRequest) + } else if s.ExportFormat == nil || (*s.ExportFormat != ComplianceExportTypeActiance && *s.ExportFormat != ComplianceExportTypeGlobalrelay && *s.ExportFormat != ComplianceExportTypeCsv) { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_type.app_error", nil, "", http.StatusBadRequest) + } + + if *s.ExportFormat == ComplianceExportTypeGlobalrelay { + if s.GlobalRelaySettings == nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.config_missing.app_error", nil, "", http.StatusBadRequest) + } else if s.GlobalRelaySettings.CustomerType == nil || (*s.GlobalRelaySettings.CustomerType != GlobalrelayCustomerTypeA9 && *s.GlobalRelaySettings.CustomerType != GlobalrelayCustomerTypeA10) { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.customer_type.app_error", nil, "", http.StatusBadRequest) + } else if s.GlobalRelaySettings.EmailAddress == nil || !strings.Contains(*s.GlobalRelaySettings.EmailAddress, "@") { + // validating email addresses is hard - just make sure it contains an '@' sign + // see https://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.email_address.app_error", nil, "", http.StatusBadRequest) + } else if s.GlobalRelaySettings.SMTPUsername == nil || *s.GlobalRelaySettings.SMTPUsername == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.smtp_username.app_error", nil, "", http.StatusBadRequest) + } else if s.GlobalRelaySettings.SMTPPassword == nil || *s.GlobalRelaySettings.SMTPPassword == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.smtp_password.app_error", nil, "", http.StatusBadRequest) + } + } + } + return nil +} + +func (s *DisplaySettings) isValid() *AppError { + if len(s.CustomURLSchemes) != 0 { + validProtocolPattern := regexp.MustCompile(`(?i)^\s*[A-Za-z][A-Za-z0-9.+-]*\s*$`) + + for _, scheme := range s.CustomURLSchemes { + if !validProtocolPattern.MatchString(scheme) { + return NewAppError( + "Config.IsValid", + "model.config.is_valid.display.custom_url_schemes.app_error", + map[string]interface{}{"Scheme": scheme}, + "", + http.StatusBadRequest, + ) + } + } + } + + return nil +} + +func (s *ImageProxySettings) isValid() *AppError { + if *s.Enable { + switch *s.ImageProxyType { + case ImageProxyTypeLocal: + // No other settings to validate + case ImageProxyTypeAtmosCamo: + if *s.RemoteImageProxyURL == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.atmos_camo_image_proxy_url.app_error", nil, "", http.StatusBadRequest) + } + + if *s.RemoteImageProxyOptions == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.atmos_camo_image_proxy_options.app_error", nil, "", http.StatusBadRequest) + } + default: + return NewAppError("Config.IsValid", "model.config.is_valid.image_proxy_type.app_error", nil, "", http.StatusBadRequest) + } + } + + return nil +} + +func (o *Config) GetSanitizeOptions() map[string]bool { + options := map[string]bool{} + options["fullname"] = *o.PrivacySettings.ShowFullName + options["email"] = *o.PrivacySettings.ShowEmailAddress + + return options +} + +func (o *Config) Sanitize() { + if o.LdapSettings.BindPassword != nil && *o.LdapSettings.BindPassword != "" { + *o.LdapSettings.BindPassword = FakeSetting + } + + if o.FileSettings.PublicLinkSalt != nil { + *o.FileSettings.PublicLinkSalt = FakeSetting + } + + if o.FileSettings.AmazonS3SecretAccessKey != nil && *o.FileSettings.AmazonS3SecretAccessKey != "" { + *o.FileSettings.AmazonS3SecretAccessKey = FakeSetting + } + + if o.EmailSettings.SMTPPassword != nil && *o.EmailSettings.SMTPPassword != "" { + *o.EmailSettings.SMTPPassword = FakeSetting + } + + if o.GitLabSettings.Secret != nil && *o.GitLabSettings.Secret != "" { + *o.GitLabSettings.Secret = FakeSetting + } + + if o.GoogleSettings.Secret != nil && *o.GoogleSettings.Secret != "" { + *o.GoogleSettings.Secret = FakeSetting + } + + if o.Office365Settings.Secret != nil && *o.Office365Settings.Secret != "" { + *o.Office365Settings.Secret = FakeSetting + } + + if o.OpenIdSettings.Secret != nil && *o.OpenIdSettings.Secret != "" { + *o.OpenIdSettings.Secret = FakeSetting + } + + if o.SqlSettings.DataSource != nil { + *o.SqlSettings.DataSource = FakeSetting + } + + if o.SqlSettings.AtRestEncryptKey != nil { + *o.SqlSettings.AtRestEncryptKey = FakeSetting + } + + if o.ElasticsearchSettings.Password != nil { + *o.ElasticsearchSettings.Password = FakeSetting + } + + for i := range o.SqlSettings.DataSourceReplicas { + o.SqlSettings.DataSourceReplicas[i] = FakeSetting + } + + for i := range o.SqlSettings.DataSourceSearchReplicas { + o.SqlSettings.DataSourceSearchReplicas[i] = FakeSetting + } + + if o.MessageExportSettings.GlobalRelaySettings != nil && + o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != nil && + *o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != "" { + *o.MessageExportSettings.GlobalRelaySettings.SMTPPassword = FakeSetting + } + + if o.ServiceSettings.GfycatAPISecret != nil && *o.ServiceSettings.GfycatAPISecret != "" { + *o.ServiceSettings.GfycatAPISecret = FakeSetting + } + + if o.ServiceSettings.SplitKey != nil { + *o.ServiceSettings.SplitKey = FakeSetting + } +} + +// structToMapFilteredByTag converts a struct into a map removing those fields that has the tag passed +// as argument +func structToMapFilteredByTag(t interface{}, typeOfTag, filterTag string) map[string]interface{} { + defer func() { + if r := recover(); r != nil { + mlog.Warn("Panicked in structToMapFilteredByTag. This should never happen.", mlog.Any("recover", r)) + } + }() + + val := reflect.ValueOf(t) + elemField := reflect.TypeOf(t) + + if val.Kind() != reflect.Struct { + return nil + } + + out := map[string]interface{}{} + + for i := 0; i < val.NumField(); i++ { + field := val.Field(i) + + structField := elemField.Field(i) + tagPermissions := strings.Split(structField.Tag.Get(typeOfTag), ",") + if isTagPresent(filterTag, tagPermissions) { + continue + } + + var value interface{} + + switch field.Kind() { + case reflect.Struct: + value = structToMapFilteredByTag(field.Interface(), typeOfTag, filterTag) + case reflect.Ptr: + indirectType := field.Elem() + if indirectType.Kind() == reflect.Struct { + value = structToMapFilteredByTag(indirectType.Interface(), typeOfTag, filterTag) + } else if indirectType.Kind() != reflect.Invalid { + value = indirectType.Interface() + } + default: + value = field.Interface() + } + + out[val.Type().Field(i).Name] = value + } + + return out +} + +func isTagPresent(tag string, tags []string) bool { + for _, val := range tags { + tagValue := strings.TrimSpace(val) + if tagValue != "" && tagValue == tag { + return true + } + } + + return false +} + +// Copied from https://golang.org/src/net/dnsclient.go#L119 +func isDomainName(s string) bool { + // See RFC 1035, RFC 3696. + // Presentation format has dots before every label except the first, and the + // terminal empty label is optional here because we assume fully-qualified + // (absolute) input. We must therefore reserve space for the first and last + // labels' length octets in wire format, where they are necessary and the + // maximum total length is 255. + // So our _effective_ maximum is 253, but 254 is not rejected if the last + // character is a dot. + l := len(s) + if l == 0 || l > 254 || l == 254 && s[l-1] != '.' { + return false + } + + last := byte('.') + ok := false // Ok once we've seen a letter. + partlen := 0 + for i := 0; i < len(s); i++ { + c := s[i] + switch { + default: + return false + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_': + ok = true + partlen++ + case '0' <= c && c <= '9': + // fine + partlen++ + case c == '-': + // Byte before dash cannot be dot. + if last == '.' { + return false + } + partlen++ + case c == '.': + // Byte before dot cannot be dot, dash. + if last == '.' || last == '-' { + return false + } + if partlen > 63 || partlen == 0 { + return false + } + partlen = 0 + } + last = c + } + if last == '-' || partlen > 63 { + return false + } + + return ok +} + +func isSafeLink(link *string) bool { + if link != nil { + if IsValidHTTPURL(*link) { + return true + } else if strings.HasPrefix(*link, "/") { + return true + } else { + return false + } + } + + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/custom_status.go b/vendor/github.com/mattermost/mattermost-server/v6/model/custom_status.go new file mode 100644 index 00000000..74cd9240 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/custom_status.go @@ -0,0 +1,148 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "bytes" + "encoding/json" + "fmt" + "time" + + "github.com/graph-gophers/graphql-go" +) + +const ( + UserPropsKeyCustomStatus = "customStatus" + + CustomStatusTextMaxRunes = 100 + MaxRecentCustomStatuses = 5 + DefaultCustomStatusEmoji = "speech_balloon" +) + +var validCustomStatusDuration = map[string]bool{ + "thirty_minutes": true, + "one_hour": true, + "four_hours": true, + "today": true, + "this_week": true, + "date_and_time": true, +} + +type CustomStatus struct { + Emoji string `json:"emoji"` + Text string `json:"text"` + Duration string `json:"duration"` + ExpiresAt time.Time `json:"expires_at"` +} + +func (cs *CustomStatus) PreSave() { + if cs.Emoji == "" { + cs.Emoji = DefaultCustomStatusEmoji + } + + if cs.Duration == "" && !cs.ExpiresAt.Before(time.Now()) { + cs.Duration = "date_and_time" + } + + runes := []rune(cs.Text) + if len(runes) > CustomStatusTextMaxRunes { + cs.Text = string(runes[:CustomStatusTextMaxRunes]) + } +} + +func (cs *CustomStatus) AreDurationAndExpirationTimeValid() bool { + if cs.Duration == "" && (cs.ExpiresAt.IsZero() || !cs.ExpiresAt.Before(time.Now())) { + return true + } + + if validCustomStatusDuration[cs.Duration] && !cs.ExpiresAt.Before(time.Now()) { + return true + } + + return false +} + +// ExpiresAt_ returns the time in a type that has the marshal/unmarshal methods +// attached to it. +func (cs *CustomStatus) ExpiresAt_() graphql.Time { + return graphql.Time{Time: cs.ExpiresAt} +} + +func RuneToHexadecimalString(r rune) string { + return fmt.Sprintf("%04x", r) +} + +type RecentCustomStatuses []CustomStatus + +func (rcs RecentCustomStatuses) Contains(cs *CustomStatus) (bool, error) { + if cs == nil { + return false, nil + } + + csJSON, jsonErr := json.Marshal(cs) + if jsonErr != nil { + return false, jsonErr + } + + // status is empty + if len(csJSON) == 0 || (cs.Emoji == "" && cs.Text == "") { + return false, nil + } + + for _, status := range rcs { + js, jsonErr := json.Marshal(status) + if jsonErr != nil { + return false, jsonErr + } + if bytes.Equal(js, csJSON) { + return true, nil + } + } + + return false, nil +} + +func (rcs RecentCustomStatuses) Add(cs *CustomStatus) RecentCustomStatuses { + newRCS := rcs[:0] + + // if same `text` exists in existing recent custom statuses, modify existing status + for _, status := range rcs { + if status.Text != cs.Text { + newRCS = append(newRCS, status) + } + } + newRCS = append(RecentCustomStatuses{*cs}, newRCS...) + if len(newRCS) > MaxRecentCustomStatuses { + newRCS = newRCS[:MaxRecentCustomStatuses] + } + return newRCS +} + +func (rcs RecentCustomStatuses) Remove(cs *CustomStatus) (RecentCustomStatuses, error) { + if cs == nil { + return rcs, nil + } + + csJSON, jsonErr := json.Marshal(cs) + if jsonErr != nil { + return rcs, jsonErr + } + + if len(csJSON) == 0 || (cs.Emoji == "" && cs.Text == "") { + return rcs, nil + } + + newRCS := rcs[:0] + for _, status := range rcs { + js, jsonErr := json.Marshal(status) + if jsonErr != nil { + return rcs, jsonErr + } + if !bytes.Equal(js, csJSON) { + newRCS = append(newRCS, status) + } + } + + return newRCS, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/data_retention_policy.go b/vendor/github.com/mattermost/mattermost-server/v6/model/data_retention_policy.go new file mode 100644 index 00000000..549b9801 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/data_retention_policy.go @@ -0,0 +1,72 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type GlobalRetentionPolicy struct { + MessageDeletionEnabled bool `json:"message_deletion_enabled"` + FileDeletionEnabled bool `json:"file_deletion_enabled"` + BoardsDeletionEnabled bool `json:"boards_deletion_enabled"` + MessageRetentionCutoff int64 `json:"message_retention_cutoff"` + FileRetentionCutoff int64 `json:"file_retention_cutoff"` + BoardsRetentionCutoff int64 `json:"boards_retention_cutoff"` +} + +type RetentionPolicy struct { + ID string `db:"Id" json:"id"` + DisplayName string `json:"display_name"` + PostDurationDays *int64 `db:"PostDuration" json:"post_duration"` +} + +type RetentionPolicyWithTeamAndChannelIDs struct { + RetentionPolicy + TeamIDs []string `json:"team_ids"` + ChannelIDs []string `json:"channel_ids"` +} + +type RetentionPolicyWithTeamAndChannelCounts struct { + RetentionPolicy + ChannelCount int64 `json:"channel_count"` + TeamCount int64 `json:"team_count"` +} + +type RetentionPolicyChannel struct { + PolicyID string `db:"PolicyId"` + ChannelID string `db:"ChannelId"` +} + +type RetentionPolicyTeam struct { + PolicyID string `db:"PolicyId"` + TeamID string `db:"TeamId"` +} + +type RetentionPolicyWithTeamAndChannelCountsList struct { + Policies []*RetentionPolicyWithTeamAndChannelCounts `json:"policies"` + TotalCount int64 `json:"total_count"` +} + +type RetentionPolicyForTeam struct { + TeamID string `db:"Id" json:"team_id"` + PostDurationDays int64 `db:"PostDuration" json:"post_duration"` +} + +type RetentionPolicyForTeamList struct { + Policies []*RetentionPolicyForTeam `json:"policies"` + TotalCount int64 `json:"total_count"` +} + +type RetentionPolicyForChannel struct { + ChannelID string `db:"Id" json:"channel_id"` + PostDurationDays int64 `db:"PostDuration" json:"post_duration"` +} + +type RetentionPolicyForChannelList struct { + Policies []*RetentionPolicyForChannel `json:"policies"` + TotalCount int64 `json:"total_count"` +} + +type RetentionPolicyCursor struct { + ChannelPoliciesDone bool + TeamPoliciesDone bool + GlobalPoliciesDone bool +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/emoji.go b/vendor/github.com/mattermost/mattermost-server/v6/model/emoji.go similarity index 63% rename from vendor/github.com/mattermost/mattermost-server/v5/model/emoji.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/emoji.go index aeee9b38..4b30ee43 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/emoji.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/emoji.go @@ -4,18 +4,17 @@ package model import ( - "encoding/json" - "io" "net/http" "regexp" + "sort" ) const ( - EMOJI_NAME_MAX_LENGTH = 64 - EMOJI_SORT_BY_NAME = "name" + EmojiNameMaxLength = 64 + EmojiSortByName = "name" ) -var EMOJI_PATTERN = regexp.MustCompile(`:[a-zA-Z0-9_-]+:`) +var EmojiPattern = regexp.MustCompile(`:[a-zA-Z0-9_+-]+:`) type Emoji struct { Id string `json:"id"` @@ -36,6 +35,28 @@ func GetSystemEmojiId(emojiName string) (string, bool) { return id, found } +func makeReverseEmojiMap() map[string][]string { + reverseEmojiMap := make(map[string][]string) + for key, value := range SystemEmojis { + emojiNames := reverseEmojiMap[value] + emojiNames = append(emojiNames, key) + sort.Strings(emojiNames) + reverseEmojiMap[value] = emojiNames + } + + return reverseEmojiMap +} + +var reverseSystemEmojisMap = makeReverseEmojiMap() + +func GetEmojiNameFromUnicode(unicode string) (emojiName string, count int) { + if emojiNames, found := reverseSystemEmojisMap[unicode]; found { + return emojiNames[0], len(emojiNames) + } + + return "", 0 +} + func (emoji *Emoji) IsValid() *AppError { if !IsValidId(emoji.Id) { return NewAppError("Emoji.IsValid", "model.emoji.id.app_error", nil, "", http.StatusBadRequest) @@ -57,9 +78,12 @@ func (emoji *Emoji) IsValid() *AppError { } func IsValidEmojiName(name string) *AppError { - if len(name) == 0 || len(name) > EMOJI_NAME_MAX_LENGTH || !IsValidAlphaNumHyphenUnderscore(name, false) || inSystemEmoji(name) { + if name == "" || len(name) > EmojiNameMaxLength || !IsValidAlphaNumHyphenUnderscorePlus(name) { return NewAppError("Emoji.IsValid", "model.emoji.name.app_error", nil, "", http.StatusBadRequest) } + if inSystemEmoji(name) { + return NewAppError("Emoji.IsValid", "model.emoji.system_emoji_name.app_error", nil, "", http.StatusBadRequest) + } return nil } @@ -72,25 +96,3 @@ func (emoji *Emoji) PreSave() { emoji.CreateAt = GetMillis() emoji.UpdateAt = emoji.CreateAt } - -func (emoji *Emoji) ToJson() string { - b, _ := json.Marshal(emoji) - return string(b) -} - -func EmojiFromJson(data io.Reader) *Emoji { - var emoji *Emoji - json.NewDecoder(data).Decode(&emoji) - return emoji -} - -func EmojiListToJson(emojiList []*Emoji) string { - b, _ := json.Marshal(emojiList) - return string(b) -} - -func EmojiListFromJson(data io.Reader) []*Emoji { - var emojiList []*Emoji - json.NewDecoder(data).Decode(&emojiList) - return emojiList -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/emoji_data.go b/vendor/github.com/mattermost/mattermost-server/v6/model/emoji_data.go new file mode 100644 index 00000000..849c2046 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/emoji_data.go @@ -0,0 +1,7 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. +// This file is automatically generated via `make emojis`. Do not modify it manually. + +package model + +var SystemEmojis = map[string]string{"grinning": "1f600", "smiley": "1f603", "smile": "1f604", "grin": "1f601", "laughing": "1f606", "satisfied": "1f606", "sweat_smile": "1f605", "rolling_on_the_floor_laughing": "1f923", "rofl": "1f923", "joy": "1f602", "slightly_smiling_face": "1f642", "upside_down_face": "1f643", "wink": "1f609", "blush": "1f60a", "innocent": "1f607", "smiling_face_with_3_hearts": "1f970", "heart_eyes": "1f60d", "star-struck": "1f929", "grinning_face_with_star_eyes": "1f929", "kissing_heart": "1f618", "kissing": "1f617", "relaxed": "263a-fe0f", "kissing_closed_eyes": "1f61a", "kissing_smiling_eyes": "1f619", "smiling_face_with_tear": "1f972", "yum": "1f60b", "stuck_out_tongue": "1f61b", "stuck_out_tongue_winking_eye": "1f61c", "zany_face": "1f92a", "grinning_face_with_one_large_and_one_small_eye": "1f92a", "stuck_out_tongue_closed_eyes": "1f61d", "money_mouth_face": "1f911", "hugging_face": "1f917", "hugs": "1f917", "face_with_hand_over_mouth": "1f92d", "smiling_face_with_smiling_eyes_and_hand_covering_mouth": "1f92d", "shushing_face": "1f92b", "face_with_finger_covering_closed_lips": "1f92b", "thinking_face": "1f914", "thinking": "1f914", "zipper_mouth_face": "1f910", "face_with_raised_eyebrow": "1f928", "face_with_one_eyebrow_raised": "1f928", "neutral_face": "1f610", "expressionless": "1f611", "no_mouth": "1f636", "smirk": "1f60f", "unamused": "1f612", "face_with_rolling_eyes": "1f644", "roll_eyes": "1f644", "grimacing": "1f62c", "lying_face": "1f925", "relieved": "1f60c", "pensive": "1f614", "sleepy": "1f62a", "drooling_face": "1f924", "sleeping": "1f634", "mask": "1f637", "face_with_thermometer": "1f912", "face_with_head_bandage": "1f915", "nauseated_face": "1f922", "face_vomiting": "1f92e", "face_with_open_mouth_vomiting": "1f92e", "sneezing_face": "1f927", "hot_face": "1f975", "cold_face": "1f976", "woozy_face": "1f974", "dizzy_face": "1f635", "exploding_head": "1f92f", "shocked_face_with_exploding_head": "1f92f", "face_with_cowboy_hat": "1f920", "cowboy_hat_face": "1f920", "partying_face": "1f973", "disguised_face": "1f978", "sunglasses": "1f60e", "nerd_face": "1f913", "face_with_monocle": "1f9d0", "confused": "1f615", "worried": "1f61f", "slightly_frowning_face": "1f641", "white_frowning_face": "2639-fe0f", "frowning_face": "2639-fe0f", "open_mouth": "1f62e", "hushed": "1f62f", "astonished": "1f632", "flushed": "1f633", "pleading_face": "1f97a", "frowning": "1f626", "anguished": "1f627", "fearful": "1f628", "cold_sweat": "1f630", "disappointed_relieved": "1f625", "cry": "1f622", "sob": "1f62d", "scream": "1f631", "confounded": "1f616", "persevere": "1f623", "disappointed": "1f61e", "sweat": "1f613", "weary": "1f629", "tired_face": "1f62b", "yawning_face": "1f971", "triumph": "1f624", "rage": "1f621", "pout": "1f621", "angry": "1f620", "face_with_symbols_on_mouth": "1f92c", "serious_face_with_symbols_covering_mouth": "1f92c", "smiling_imp": "1f608", "imp": "1f47f", "skull": "1f480", "skull_and_crossbones": "2620-fe0f", "hankey": "1f4a9", "poop": "1f4a9", "shit": "1f4a9", "clown_face": "1f921", "japanese_ogre": "1f479", "japanese_goblin": "1f47a", "ghost": "1f47b", "alien": "1f47d", "space_invader": "1f47e", "robot_face": "1f916", "robot": "1f916", "smiley_cat": "1f63a", "smile_cat": "1f638", "joy_cat": "1f639", "heart_eyes_cat": "1f63b", "smirk_cat": "1f63c", "kissing_cat": "1f63d", "scream_cat": "1f640", "crying_cat_face": "1f63f", "pouting_cat": "1f63e", "see_no_evil": "1f648", "hear_no_evil": "1f649", "speak_no_evil": "1f64a", "kiss": "1f48b", "love_letter": "1f48c", "cupid": "1f498", "gift_heart": "1f49d", "sparkling_heart": "1f496", "heartpulse": "1f497", "heartbeat": "1f493", "revolving_hearts": "1f49e", "two_hearts": "1f495", "heart_decoration": "1f49f", "heavy_heart_exclamation_mark_ornament": "2763-fe0f", "heavy_heart_exclamation": "2763-fe0f", "broken_heart": "1f494", "heart": "2764-fe0f", "orange_heart": "1f9e1", "yellow_heart": "1f49b", "green_heart": "1f49a", "blue_heart": "1f499", "purple_heart": "1f49c", "brown_heart": "1f90e", "black_heart": "1f5a4", "white_heart": "1f90d", "100": "1f4af", "anger": "1f4a2", "boom": "1f4a5", "collision": "1f4a5", "dizzy": "1f4ab", "sweat_drops": "1f4a6", "dash": "1f4a8", "hole": "1f573-fe0f", "bomb": "1f4a3", "speech_balloon": "1f4ac", "eye-in-speech-bubble": "1f441-fe0f-200d-1f5e8-fe0f", "left_speech_bubble": "1f5e8-fe0f", "right_anger_bubble": "1f5ef-fe0f", "thought_balloon": "1f4ad", "zzz": "1f4a4", "wave": "1f44b", "raised_back_of_hand": "1f91a", "raised_hand_with_fingers_splayed": "1f590-fe0f", "hand": "270b", "raised_hand": "270b", "spock-hand": "1f596", "vulcan_salute": "1f596", "ok_hand": "1f44c", "pinched_fingers": "1f90c", "pinching_hand": "1f90f", "v": "270c-fe0f", "crossed_fingers": "1f91e", "hand_with_index_and_middle_fingers_crossed": "1f91e", "i_love_you_hand_sign": "1f91f", "the_horns": "1f918", "sign_of_the_horns": "1f918", "metal": "1f918", "call_me_hand": "1f919", "point_left": "1f448", "point_right": "1f449", "point_up_2": "1f446", "middle_finger": "1f595", "reversed_hand_with_middle_finger_extended": "1f595", "fu": "1f595", "point_down": "1f447", "point_up": "261d-fe0f", "+1": "1f44d", "thumbsup": "1f44d", "-1": "1f44e", "thumbsdown": "1f44e", "fist": "270a", "fist_raised": "270a", "facepunch": "1f44a", "punch": "1f44a", "fist_oncoming": "1f44a", "left-facing_fist": "1f91b", "fist_left": "1f91b", "right-facing_fist": "1f91c", "fist_right": "1f91c", "clap": "1f44f", "raised_hands": "1f64c", "open_hands": "1f450", "palms_up_together": "1f932", "handshake": "1f91d", "pray": "1f64f", "writing_hand": "270d-fe0f", "nail_care": "1f485", "selfie": "1f933", "muscle": "1f4aa", "mechanical_arm": "1f9be", "mechanical_leg": "1f9bf", "leg": "1f9b5", "foot": "1f9b6", "ear": "1f442", "ear_with_hearing_aid": "1f9bb", "nose": "1f443", "brain": "1f9e0", "anatomical_heart": "1fac0", "lungs": "1fac1", "tooth": "1f9b7", "bone": "1f9b4", "eyes": "1f440", "eye": "1f441-fe0f", "tongue": "1f445", "lips": "1f444", "baby": "1f476", "child": "1f9d2", "boy": "1f466", "girl": "1f467", "adult": "1f9d1", "person_with_blond_hair": "1f471", "man": "1f468", "bearded_person": "1f9d4", "red_haired_man": "1f468-200d-1f9b0", "curly_haired_man": "1f468-200d-1f9b1", "white_haired_man": "1f468-200d-1f9b3", "bald_man": "1f468-200d-1f9b2", "woman": "1f469", "red_haired_woman": "1f469-200d-1f9b0", "red_haired_person": "1f9d1-200d-1f9b0", "curly_haired_woman": "1f469-200d-1f9b1", "curly_haired_person": "1f9d1-200d-1f9b1", "white_haired_woman": "1f469-200d-1f9b3", "white_haired_person": "1f9d1-200d-1f9b3", "bald_woman": "1f469-200d-1f9b2", "bald_person": "1f9d1-200d-1f9b2", "blond-haired-woman": "1f471-200d-2640-fe0f", "blonde_woman": "1f471-200d-2640-fe0f", "blond-haired-man": "1f471-200d-2642-fe0f", "blonde_man": "1f471-200d-2642-fe0f", "older_adult": "1f9d3", "older_man": "1f474", "older_woman": "1f475", "person_frowning": "1f64d", "man-frowning": "1f64d-200d-2642-fe0f", "frowning_man": "1f64d-200d-2642-fe0f", "woman-frowning": "1f64d-200d-2640-fe0f", "frowning_woman": "1f64d-200d-2640-fe0f", "person_with_pouting_face": "1f64e", "man-pouting": "1f64e-200d-2642-fe0f", "pouting_man": "1f64e-200d-2642-fe0f", "woman-pouting": "1f64e-200d-2640-fe0f", "pouting_woman": "1f64e-200d-2640-fe0f", "no_good": "1f645", "man-gesturing-no": "1f645-200d-2642-fe0f", "ng_man": "1f645-200d-2642-fe0f", "no_good_man": "1f645-200d-2642-fe0f", "woman-gesturing-no": "1f645-200d-2640-fe0f", "no_good_woman": "1f645-200d-2640-fe0f", "ng_woman": "1f645-200d-2640-fe0f", "ok_woman": "1f646", "man-gesturing-ok": "1f646-200d-2642-fe0f", "ok_man": "1f646-200d-2642-fe0f", "woman-gesturing-ok": "1f646-200d-2640-fe0f", "information_desk_person": "1f481", "man-tipping-hand": "1f481-200d-2642-fe0f", "tipping_hand_man": "1f481-200d-2642-fe0f", "woman-tipping-hand": "1f481-200d-2640-fe0f", "tipping_hand_woman": "1f481-200d-2640-fe0f", "raising_hand": "1f64b", "man-raising-hand": "1f64b-200d-2642-fe0f", "raising_hand_man": "1f64b-200d-2642-fe0f", "woman-raising-hand": "1f64b-200d-2640-fe0f", "raising_hand_woman": "1f64b-200d-2640-fe0f", "deaf_person": "1f9cf", "deaf_man": "1f9cf-200d-2642-fe0f", "deaf_woman": "1f9cf-200d-2640-fe0f", "bow": "1f647", "man-bowing": "1f647-200d-2642-fe0f", "bowing_man": "1f647-200d-2642-fe0f", "woman-bowing": "1f647-200d-2640-fe0f", "bowing_woman": "1f647-200d-2640-fe0f", "face_palm": "1f926", "man-facepalming": "1f926-200d-2642-fe0f", "man_facepalming": "1f926-200d-2642-fe0f", "woman-facepalming": "1f926-200d-2640-fe0f", "woman_facepalming": "1f926-200d-2640-fe0f", "shrug": "1f937", "man-shrugging": "1f937-200d-2642-fe0f", "man_shrugging": "1f937-200d-2642-fe0f", "woman-shrugging": "1f937-200d-2640-fe0f", "woman_shrugging": "1f937-200d-2640-fe0f", "health_worker": "1f9d1-200d-2695-fe0f", "male-doctor": "1f468-200d-2695-fe0f", "man_health_worker": "1f468-200d-2695-fe0f", "female-doctor": "1f469-200d-2695-fe0f", "woman_health_worker": "1f469-200d-2695-fe0f", "student": "1f9d1-200d-1f393", "male-student": "1f468-200d-1f393", "man_student": "1f468-200d-1f393", "female-student": "1f469-200d-1f393", "woman_student": "1f469-200d-1f393", "teacher": "1f9d1-200d-1f3eb", "male-teacher": "1f468-200d-1f3eb", "man_teacher": "1f468-200d-1f3eb", "female-teacher": "1f469-200d-1f3eb", "woman_teacher": "1f469-200d-1f3eb", "judge": "1f9d1-200d-2696-fe0f", "male-judge": "1f468-200d-2696-fe0f", "man_judge": "1f468-200d-2696-fe0f", "female-judge": "1f469-200d-2696-fe0f", "woman_judge": "1f469-200d-2696-fe0f", "farmer": "1f9d1-200d-1f33e", "male-farmer": "1f468-200d-1f33e", "man_farmer": "1f468-200d-1f33e", "female-farmer": "1f469-200d-1f33e", "woman_farmer": "1f469-200d-1f33e", "cook": "1f9d1-200d-1f373", "male-cook": "1f468-200d-1f373", "man_cook": "1f468-200d-1f373", "female-cook": "1f469-200d-1f373", "woman_cook": "1f469-200d-1f373", "mechanic": "1f9d1-200d-1f527", "male-mechanic": "1f468-200d-1f527", "man_mechanic": "1f468-200d-1f527", "female-mechanic": "1f469-200d-1f527", "woman_mechanic": "1f469-200d-1f527", "factory_worker": "1f9d1-200d-1f3ed", "male-factory-worker": "1f468-200d-1f3ed", "man_factory_worker": "1f468-200d-1f3ed", "female-factory-worker": "1f469-200d-1f3ed", "woman_factory_worker": "1f469-200d-1f3ed", "office_worker": "1f9d1-200d-1f4bc", "male-office-worker": "1f468-200d-1f4bc", "man_office_worker": "1f468-200d-1f4bc", "female-office-worker": "1f469-200d-1f4bc", "woman_office_worker": "1f469-200d-1f4bc", "scientist": "1f9d1-200d-1f52c", "male-scientist": "1f468-200d-1f52c", "man_scientist": "1f468-200d-1f52c", "female-scientist": "1f469-200d-1f52c", "woman_scientist": "1f469-200d-1f52c", "technologist": "1f9d1-200d-1f4bb", "male-technologist": "1f468-200d-1f4bb", "man_technologist": "1f468-200d-1f4bb", "female-technologist": "1f469-200d-1f4bb", "woman_technologist": "1f469-200d-1f4bb", "singer": "1f9d1-200d-1f3a4", "male-singer": "1f468-200d-1f3a4", "man_singer": "1f468-200d-1f3a4", "female-singer": "1f469-200d-1f3a4", "woman_singer": "1f469-200d-1f3a4", "artist": "1f9d1-200d-1f3a8", "male-artist": "1f468-200d-1f3a8", "man_artist": "1f468-200d-1f3a8", "female-artist": "1f469-200d-1f3a8", "woman_artist": "1f469-200d-1f3a8", "pilot": "1f9d1-200d-2708-fe0f", "male-pilot": "1f468-200d-2708-fe0f", "man_pilot": "1f468-200d-2708-fe0f", "female-pilot": "1f469-200d-2708-fe0f", "woman_pilot": "1f469-200d-2708-fe0f", "astronaut": "1f9d1-200d-1f680", "male-astronaut": "1f468-200d-1f680", "man_astronaut": "1f468-200d-1f680", "female-astronaut": "1f469-200d-1f680", "woman_astronaut": "1f469-200d-1f680", "firefighter": "1f9d1-200d-1f692", "male-firefighter": "1f468-200d-1f692", "man_firefighter": "1f468-200d-1f692", "female-firefighter": "1f469-200d-1f692", "woman_firefighter": "1f469-200d-1f692", "cop": "1f46e", "male-police-officer": "1f46e-200d-2642-fe0f", "policeman": "1f46e-200d-2642-fe0f", "female-police-officer": "1f46e-200d-2640-fe0f", "policewoman": "1f46e-200d-2640-fe0f", "sleuth_or_spy": "1f575-fe0f", "detective": "1f575-fe0f", "male-detective": "1f575-fe0f-200d-2642-fe0f", "male_detective": "1f575-fe0f-200d-2642-fe0f", "female-detective": "1f575-fe0f-200d-2640-fe0f", "female_detective": "1f575-fe0f-200d-2640-fe0f", "guardsman": "1f482", "male-guard": "1f482-200d-2642-fe0f", "female-guard": "1f482-200d-2640-fe0f", "guardswoman": "1f482-200d-2640-fe0f", "ninja": "1f977", "construction_worker": "1f477", "male-construction-worker": "1f477-200d-2642-fe0f", "construction_worker_man": "1f477-200d-2642-fe0f", "female-construction-worker": "1f477-200d-2640-fe0f", "construction_worker_woman": "1f477-200d-2640-fe0f", "prince": "1f934", "princess": "1f478", "man_with_turban": "1f473", "man-wearing-turban": "1f473-200d-2642-fe0f", "woman-wearing-turban": "1f473-200d-2640-fe0f", "woman_with_turban": "1f473-200d-2640-fe0f", "man_with_gua_pi_mao": "1f472", "person_with_headscarf": "1f9d5", "person_in_tuxedo": "1f935", "man_in_tuxedo": "1f935-200d-2642-fe0f", "woman_in_tuxedo": "1f935-200d-2640-fe0f", "bride_with_veil": "1f470", "man_with_veil": "1f470-200d-2642-fe0f", "woman_with_veil": "1f470-200d-2640-fe0f", "pregnant_woman": "1f930", "breast-feeding": "1f931", "woman_feeding_baby": "1f469-200d-1f37c", "man_feeding_baby": "1f468-200d-1f37c", "person_feeding_baby": "1f9d1-200d-1f37c", "angel": "1f47c", "santa": "1f385", "mrs_claus": "1f936", "mother_christmas": "1f936", "mx_claus": "1f9d1-200d-1f384", "superhero": "1f9b8", "male_superhero": "1f9b8-200d-2642-fe0f", "female_superhero": "1f9b8-200d-2640-fe0f", "supervillain": "1f9b9", "male_supervillain": "1f9b9-200d-2642-fe0f", "female_supervillain": "1f9b9-200d-2640-fe0f", "mage": "1f9d9", "male_mage": "1f9d9-200d-2642-fe0f", "female_mage": "1f9d9-200d-2640-fe0f", "fairy": "1f9da", "male_fairy": "1f9da-200d-2642-fe0f", "female_fairy": "1f9da-200d-2640-fe0f", "vampire": "1f9db", "male_vampire": "1f9db-200d-2642-fe0f", "female_vampire": "1f9db-200d-2640-fe0f", "merperson": "1f9dc", "merman": "1f9dc-200d-2642-fe0f", "mermaid": "1f9dc-200d-2640-fe0f", "elf": "1f9dd", "male_elf": "1f9dd-200d-2642-fe0f", "female_elf": "1f9dd-200d-2640-fe0f", "genie": "1f9de", "male_genie": "1f9de-200d-2642-fe0f", "female_genie": "1f9de-200d-2640-fe0f", "zombie": "1f9df", "male_zombie": "1f9df-200d-2642-fe0f", "female_zombie": "1f9df-200d-2640-fe0f", "massage": "1f486", "man-getting-massage": "1f486-200d-2642-fe0f", "massage_man": "1f486-200d-2642-fe0f", "woman-getting-massage": "1f486-200d-2640-fe0f", "massage_woman": "1f486-200d-2640-fe0f", "haircut": "1f487", "man-getting-haircut": "1f487-200d-2642-fe0f", "haircut_man": "1f487-200d-2642-fe0f", "woman-getting-haircut": "1f487-200d-2640-fe0f", "haircut_woman": "1f487-200d-2640-fe0f", "walking": "1f6b6", "man-walking": "1f6b6-200d-2642-fe0f", "walking_man": "1f6b6-200d-2642-fe0f", "woman-walking": "1f6b6-200d-2640-fe0f", "walking_woman": "1f6b6-200d-2640-fe0f", "standing_person": "1f9cd", "man_standing": "1f9cd-200d-2642-fe0f", "woman_standing": "1f9cd-200d-2640-fe0f", "kneeling_person": "1f9ce", "man_kneeling": "1f9ce-200d-2642-fe0f", "woman_kneeling": "1f9ce-200d-2640-fe0f", "person_with_probing_cane": "1f9d1-200d-1f9af", "man_with_probing_cane": "1f468-200d-1f9af", "woman_with_probing_cane": "1f469-200d-1f9af", "person_in_motorized_wheelchair": "1f9d1-200d-1f9bc", "man_in_motorized_wheelchair": "1f468-200d-1f9bc", "woman_in_motorized_wheelchair": "1f469-200d-1f9bc", "person_in_manual_wheelchair": "1f9d1-200d-1f9bd", "man_in_manual_wheelchair": "1f468-200d-1f9bd", "woman_in_manual_wheelchair": "1f469-200d-1f9bd", "runner": "1f3c3", "running": "1f3c3", "man-running": "1f3c3-200d-2642-fe0f", "running_man": "1f3c3-200d-2642-fe0f", "woman-running": "1f3c3-200d-2640-fe0f", "running_woman": "1f3c3-200d-2640-fe0f", "dancer": "1f483", "man_dancing": "1f57a", "man_in_business_suit_levitating": "1f574-fe0f", "business_suit_levitating": "1f574-fe0f", "dancers": "1f46f", "man-with-bunny-ears-partying": "1f46f-200d-2642-fe0f", "dancing_men": "1f46f-200d-2642-fe0f", "woman-with-bunny-ears-partying": "1f46f-200d-2640-fe0f", "dancing_women": "1f46f-200d-2640-fe0f", "person_in_steamy_room": "1f9d6", "man_in_steamy_room": "1f9d6-200d-2642-fe0f", "woman_in_steamy_room": "1f9d6-200d-2640-fe0f", "person_climbing": "1f9d7", "man_climbing": "1f9d7-200d-2642-fe0f", "woman_climbing": "1f9d7-200d-2640-fe0f", "fencer": "1f93a", "person_fencing": "1f93a", "horse_racing": "1f3c7", "skier": "26f7-fe0f", "snowboarder": "1f3c2", "golfer": "1f3cc-fe0f", "man-golfing": "1f3cc-fe0f-200d-2642-fe0f", "golfing_man": "1f3cc-fe0f-200d-2642-fe0f", "woman-golfing": "1f3cc-fe0f-200d-2640-fe0f", "golfing_woman": "1f3cc-fe0f-200d-2640-fe0f", "surfer": "1f3c4", "man-surfing": "1f3c4-200d-2642-fe0f", "surfing_man": "1f3c4-200d-2642-fe0f", "woman-surfing": "1f3c4-200d-2640-fe0f", "surfing_woman": "1f3c4-200d-2640-fe0f", "rowboat": "1f6a3", "man-rowing-boat": "1f6a3-200d-2642-fe0f", "rowing_man": "1f6a3-200d-2642-fe0f", "woman-rowing-boat": "1f6a3-200d-2640-fe0f", "rowing_woman": "1f6a3-200d-2640-fe0f", "swimmer": "1f3ca", "man-swimming": "1f3ca-200d-2642-fe0f", "swimming_man": "1f3ca-200d-2642-fe0f", "woman-swimming": "1f3ca-200d-2640-fe0f", "swimming_woman": "1f3ca-200d-2640-fe0f", "person_with_ball": "26f9-fe0f", "man-bouncing-ball": "26f9-fe0f-200d-2642-fe0f", "basketball_man": "26f9-fe0f-200d-2642-fe0f", "woman-bouncing-ball": "26f9-fe0f-200d-2640-fe0f", "basketball_woman": "26f9-fe0f-200d-2640-fe0f", "weight_lifter": "1f3cb-fe0f", "man-lifting-weights": "1f3cb-fe0f-200d-2642-fe0f", "weight_lifting_man": "1f3cb-fe0f-200d-2642-fe0f", "woman-lifting-weights": "1f3cb-fe0f-200d-2640-fe0f", "weight_lifting_woman": "1f3cb-fe0f-200d-2640-fe0f", "bicyclist": "1f6b4", "man-biking": "1f6b4-200d-2642-fe0f", "biking_man": "1f6b4-200d-2642-fe0f", "woman-biking": "1f6b4-200d-2640-fe0f", "biking_woman": "1f6b4-200d-2640-fe0f", "mountain_bicyclist": "1f6b5", "man-mountain-biking": "1f6b5-200d-2642-fe0f", "mountain_biking_man": "1f6b5-200d-2642-fe0f", "woman-mountain-biking": "1f6b5-200d-2640-fe0f", "mountain_biking_woman": "1f6b5-200d-2640-fe0f", "person_doing_cartwheel": "1f938", "man-cartwheeling": "1f938-200d-2642-fe0f", "man_cartwheeling": "1f938-200d-2642-fe0f", "woman-cartwheeling": "1f938-200d-2640-fe0f", "woman_cartwheeling": "1f938-200d-2640-fe0f", "wrestlers": "1f93c", "man-wrestling": "1f93c-200d-2642-fe0f", "men_wrestling": "1f93c-200d-2642-fe0f", "woman-wrestling": "1f93c-200d-2640-fe0f", "women_wrestling": "1f93c-200d-2640-fe0f", "water_polo": "1f93d", "man-playing-water-polo": "1f93d-200d-2642-fe0f", "man_playing_water_polo": "1f93d-200d-2642-fe0f", "woman-playing-water-polo": "1f93d-200d-2640-fe0f", "woman_playing_water_polo": "1f93d-200d-2640-fe0f", "handball": "1f93e", "man-playing-handball": "1f93e-200d-2642-fe0f", "man_playing_handball": "1f93e-200d-2642-fe0f", "woman-playing-handball": "1f93e-200d-2640-fe0f", "woman_playing_handball": "1f93e-200d-2640-fe0f", "juggling": "1f939", "man-juggling": "1f939-200d-2642-fe0f", "man_juggling": "1f939-200d-2642-fe0f", "woman-juggling": "1f939-200d-2640-fe0f", "woman_juggling": "1f939-200d-2640-fe0f", "person_in_lotus_position": "1f9d8", "man_in_lotus_position": "1f9d8-200d-2642-fe0f", "woman_in_lotus_position": "1f9d8-200d-2640-fe0f", "bath": "1f6c0", "sleeping_accommodation": "1f6cc", "sleeping_bed": "1f6cc", "people_holding_hands": "1f9d1-200d-1f91d-200d-1f9d1", "two_women_holding_hands": "1f46d", "women_holding_hands": "1f46d", "man_and_woman_holding_hands": "1f46b", "woman_and_man_holding_hands": "1f46b", "couple": "1f46b", "two_men_holding_hands": "1f46c", "men_holding_hands": "1f46c", "couplekiss": "1f48f", "woman-kiss-man": "1f469-200d-2764-fe0f-200d-1f48b-200d-1f468", "couplekiss_man_woman": "1f469-200d-2764-fe0f-200d-1f48b-200d-1f468", "man-kiss-man": "1f468-200d-2764-fe0f-200d-1f48b-200d-1f468", "couplekiss_man_man": "1f468-200d-2764-fe0f-200d-1f48b-200d-1f468", "woman-kiss-woman": "1f469-200d-2764-fe0f-200d-1f48b-200d-1f469", "couplekiss_woman_woman": "1f469-200d-2764-fe0f-200d-1f48b-200d-1f469", "couple_with_heart": "1f491", "woman-heart-man": "1f469-200d-2764-fe0f-200d-1f468", "couple_with_heart_woman_man": "1f469-200d-2764-fe0f-200d-1f468", "man-heart-man": "1f468-200d-2764-fe0f-200d-1f468", "couple_with_heart_man_man": "1f468-200d-2764-fe0f-200d-1f468", "woman-heart-woman": "1f469-200d-2764-fe0f-200d-1f469", "couple_with_heart_woman_woman": "1f469-200d-2764-fe0f-200d-1f469", "family": "1f46a", "man-woman-boy": "1f468-200d-1f469-200d-1f466", "family_man_woman_boy": "1f468-200d-1f469-200d-1f466", "man-woman-girl": "1f468-200d-1f469-200d-1f467", "family_man_woman_girl": "1f468-200d-1f469-200d-1f467", "man-woman-girl-boy": "1f468-200d-1f469-200d-1f467-200d-1f466", "family_man_woman_girl_boy": "1f468-200d-1f469-200d-1f467-200d-1f466", "man-woman-boy-boy": "1f468-200d-1f469-200d-1f466-200d-1f466", "family_man_woman_boy_boy": "1f468-200d-1f469-200d-1f466-200d-1f466", "man-woman-girl-girl": "1f468-200d-1f469-200d-1f467-200d-1f467", "family_man_woman_girl_girl": "1f468-200d-1f469-200d-1f467-200d-1f467", "man-man-boy": "1f468-200d-1f468-200d-1f466", "family_man_man_boy": "1f468-200d-1f468-200d-1f466", "man-man-girl": "1f468-200d-1f468-200d-1f467", "family_man_man_girl": "1f468-200d-1f468-200d-1f467", "man-man-girl-boy": "1f468-200d-1f468-200d-1f467-200d-1f466", "family_man_man_girl_boy": "1f468-200d-1f468-200d-1f467-200d-1f466", "man-man-boy-boy": "1f468-200d-1f468-200d-1f466-200d-1f466", "family_man_man_boy_boy": "1f468-200d-1f468-200d-1f466-200d-1f466", "man-man-girl-girl": "1f468-200d-1f468-200d-1f467-200d-1f467", "family_man_man_girl_girl": "1f468-200d-1f468-200d-1f467-200d-1f467", "woman-woman-boy": "1f469-200d-1f469-200d-1f466", "family_woman_woman_boy": "1f469-200d-1f469-200d-1f466", "woman-woman-girl": "1f469-200d-1f469-200d-1f467", "family_woman_woman_girl": "1f469-200d-1f469-200d-1f467", "woman-woman-girl-boy": "1f469-200d-1f469-200d-1f467-200d-1f466", "family_woman_woman_girl_boy": "1f469-200d-1f469-200d-1f467-200d-1f466", "woman-woman-boy-boy": "1f469-200d-1f469-200d-1f466-200d-1f466", "family_woman_woman_boy_boy": "1f469-200d-1f469-200d-1f466-200d-1f466", "woman-woman-girl-girl": "1f469-200d-1f469-200d-1f467-200d-1f467", "family_woman_woman_girl_girl": "1f469-200d-1f469-200d-1f467-200d-1f467", "man-boy": "1f468-200d-1f466", "family_man_boy": "1f468-200d-1f466", "man-boy-boy": "1f468-200d-1f466-200d-1f466", "family_man_boy_boy": "1f468-200d-1f466-200d-1f466", "man-girl": "1f468-200d-1f467", "family_man_girl": "1f468-200d-1f467", "man-girl-boy": "1f468-200d-1f467-200d-1f466", "family_man_girl_boy": "1f468-200d-1f467-200d-1f466", "man-girl-girl": "1f468-200d-1f467-200d-1f467", "family_man_girl_girl": "1f468-200d-1f467-200d-1f467", "woman-boy": "1f469-200d-1f466", "family_woman_boy": "1f469-200d-1f466", "woman-boy-boy": "1f469-200d-1f466-200d-1f466", "family_woman_boy_boy": "1f469-200d-1f466-200d-1f466", "woman-girl": "1f469-200d-1f467", "family_woman_girl": "1f469-200d-1f467", "woman-girl-boy": "1f469-200d-1f467-200d-1f466", "family_woman_girl_boy": "1f469-200d-1f467-200d-1f466", "woman-girl-girl": "1f469-200d-1f467-200d-1f467", "family_woman_girl_girl": "1f469-200d-1f467-200d-1f467", "speaking_head_in_silhouette": "1f5e3-fe0f", "speaking_head": "1f5e3-fe0f", "bust_in_silhouette": "1f464", "busts_in_silhouette": "1f465", "people_hugging": "1fac2", "footprints": "1f463", "skin-tone-2": "1f3fb", "skin-tone-3": "1f3fc", "skin-tone-4": "1f3fd", "skin-tone-5": "1f3fe", "skin-tone-6": "1f3ff", "monkey_face": "1f435", "monkey": "1f412", "gorilla": "1f98d", "orangutan": "1f9a7", "dog": "1f436", "dog2": "1f415", "guide_dog": "1f9ae", "service_dog": "1f415-200d-1f9ba", "poodle": "1f429", "wolf": "1f43a", "fox_face": "1f98a", "raccoon": "1f99d", "cat": "1f431", "cat2": "1f408", "black_cat": "1f408-200d-2b1b", "lion_face": "1f981", "lion": "1f981", "tiger": "1f42f", "tiger2": "1f405", "leopard": "1f406", "horse": "1f434", "racehorse": "1f40e", "unicorn_face": "1f984", "unicorn": "1f984", "zebra_face": "1f993", "deer": "1f98c", "bison": "1f9ac", "cow": "1f42e", "ox": "1f402", "water_buffalo": "1f403", "cow2": "1f404", "pig": "1f437", "pig2": "1f416", "boar": "1f417", "pig_nose": "1f43d", "ram": "1f40f", "sheep": "1f411", "goat": "1f410", "dromedary_camel": "1f42a", "camel": "1f42b", "llama": "1f999", "giraffe_face": "1f992", "elephant": "1f418", "mammoth": "1f9a3", "rhinoceros": "1f98f", "hippopotamus": "1f99b", "mouse": "1f42d", "mouse2": "1f401", "rat": "1f400", "hamster": "1f439", "rabbit": "1f430", "rabbit2": "1f407", "chipmunk": "1f43f-fe0f", "beaver": "1f9ab", "hedgehog": "1f994", "bat": "1f987", "bear": "1f43b", "polar_bear": "1f43b-200d-2744-fe0f", "koala": "1f428", "panda_face": "1f43c", "sloth": "1f9a5", "otter": "1f9a6", "skunk": "1f9a8", "kangaroo": "1f998", "badger": "1f9a1", "feet": "1f43e", "paw_prints": "1f43e", "turkey": "1f983", "chicken": "1f414", "rooster": "1f413", "hatching_chick": "1f423", "baby_chick": "1f424", "hatched_chick": "1f425", "bird": "1f426", "penguin": "1f427", "dove_of_peace": "1f54a-fe0f", "dove": "1f54a-fe0f", "eagle": "1f985", "duck": "1f986", "swan": "1f9a2", "owl": "1f989", "dodo": "1f9a4", "feather": "1fab6", "flamingo": "1f9a9", "peacock": "1f99a", "parrot": "1f99c", "frog": "1f438", "crocodile": "1f40a", "turtle": "1f422", "lizard": "1f98e", "snake": "1f40d", "dragon_face": "1f432", "dragon": "1f409", "sauropod": "1f995", "t-rex": "1f996", "whale": "1f433", "whale2": "1f40b", "dolphin": "1f42c", "flipper": "1f42c", "seal": "1f9ad", "fish": "1f41f", "tropical_fish": "1f420", "blowfish": "1f421", "shark": "1f988", "octopus": "1f419", "shell": "1f41a", "snail": "1f40c", "butterfly": "1f98b", "bug": "1f41b", "ant": "1f41c", "bee": "1f41d", "honeybee": "1f41d", "beetle": "1fab2", "ladybug": "1f41e", "lady_beetle": "1f41e", "cricket": "1f997", "cockroach": "1fab3", "spider": "1f577-fe0f", "spider_web": "1f578-fe0f", "scorpion": "1f982", "mosquito": "1f99f", "fly": "1fab0", "worm": "1fab1", "microbe": "1f9a0", "bouquet": "1f490", "cherry_blossom": "1f338", "white_flower": "1f4ae", "rosette": "1f3f5-fe0f", "rose": "1f339", "wilted_flower": "1f940", "hibiscus": "1f33a", "sunflower": "1f33b", "blossom": "1f33c", "tulip": "1f337", "seedling": "1f331", "potted_plant": "1fab4", "evergreen_tree": "1f332", "deciduous_tree": "1f333", "palm_tree": "1f334", "cactus": "1f335", "ear_of_rice": "1f33e", "herb": "1f33f", "shamrock": "2618-fe0f", "four_leaf_clover": "1f340", "maple_leaf": "1f341", "fallen_leaf": "1f342", "leaves": "1f343", "grapes": "1f347", "melon": "1f348", "watermelon": "1f349", "tangerine": "1f34a", "mandarin": "1f34a", "orange": "1f34a", "lemon": "1f34b", "banana": "1f34c", "pineapple": "1f34d", "mango": "1f96d", "apple": "1f34e", "green_apple": "1f34f", "pear": "1f350", "peach": "1f351", "cherries": "1f352", "strawberry": "1f353", "blueberries": "1fad0", "kiwifruit": "1f95d", "kiwi_fruit": "1f95d", "tomato": "1f345", "olive": "1fad2", "coconut": "1f965", "avocado": "1f951", "eggplant": "1f346", "potato": "1f954", "carrot": "1f955", "corn": "1f33d", "hot_pepper": "1f336-fe0f", "bell_pepper": "1fad1", "cucumber": "1f952", "leafy_green": "1f96c", "broccoli": "1f966", "garlic": "1f9c4", "onion": "1f9c5", "mushroom": "1f344", "peanuts": "1f95c", "chestnut": "1f330", "bread": "1f35e", "croissant": "1f950", "baguette_bread": "1f956", "flatbread": "1fad3", "pretzel": "1f968", "bagel": "1f96f", "pancakes": "1f95e", "waffle": "1f9c7", "cheese_wedge": "1f9c0", "cheese": "1f9c0", "meat_on_bone": "1f356", "poultry_leg": "1f357", "cut_of_meat": "1f969", "bacon": "1f953", "hamburger": "1f354", "fries": "1f35f", "pizza": "1f355", "hotdog": "1f32d", "sandwich": "1f96a", "taco": "1f32e", "burrito": "1f32f", "tamale": "1fad4", "stuffed_flatbread": "1f959", "falafel": "1f9c6", "egg": "1f95a", "fried_egg": "1f373", "cooking": "1f373", "shallow_pan_of_food": "1f958", "stew": "1f372", "fondue": "1fad5", "bowl_with_spoon": "1f963", "green_salad": "1f957", "popcorn": "1f37f", "butter": "1f9c8", "salt": "1f9c2", "canned_food": "1f96b", "bento": "1f371", "rice_cracker": "1f358", "rice_ball": "1f359", "rice": "1f35a", "curry": "1f35b", "ramen": "1f35c", "spaghetti": "1f35d", "sweet_potato": "1f360", "oden": "1f362", "sushi": "1f363", "fried_shrimp": "1f364", "fish_cake": "1f365", "moon_cake": "1f96e", "dango": "1f361", "dumpling": "1f95f", "fortune_cookie": "1f960", "takeout_box": "1f961", "crab": "1f980", "lobster": "1f99e", "shrimp": "1f990", "squid": "1f991", "oyster": "1f9aa", "icecream": "1f366", "shaved_ice": "1f367", "ice_cream": "1f368", "doughnut": "1f369", "cookie": "1f36a", "birthday": "1f382", "cake": "1f370", "cupcake": "1f9c1", "pie": "1f967", "chocolate_bar": "1f36b", "candy": "1f36c", "lollipop": "1f36d", "custard": "1f36e", "honey_pot": "1f36f", "baby_bottle": "1f37c", "glass_of_milk": "1f95b", "milk_glass": "1f95b", "coffee": "2615", "teapot": "1fad6", "tea": "1f375", "sake": "1f376", "champagne": "1f37e", "wine_glass": "1f377", "cocktail": "1f378", "tropical_drink": "1f379", "beer": "1f37a", "beers": "1f37b", "clinking_glasses": "1f942", "tumbler_glass": "1f943", "cup_with_straw": "1f964", "bubble_tea": "1f9cb", "beverage_box": "1f9c3", "mate_drink": "1f9c9", "ice_cube": "1f9ca", "chopsticks": "1f962", "knife_fork_plate": "1f37d-fe0f", "plate_with_cutlery": "1f37d-fe0f", "fork_and_knife": "1f374", "spoon": "1f944", "hocho": "1f52a", "knife": "1f52a", "amphora": "1f3fa", "earth_africa": "1f30d", "earth_americas": "1f30e", "earth_asia": "1f30f", "globe_with_meridians": "1f310", "world_map": "1f5fa-fe0f", "japan": "1f5fe", "compass": "1f9ed", "snow_capped_mountain": "1f3d4-fe0f", "mountain_snow": "1f3d4-fe0f", "mountain": "26f0-fe0f", "volcano": "1f30b", "mount_fuji": "1f5fb", "camping": "1f3d5-fe0f", "beach_with_umbrella": "1f3d6-fe0f", "beach_umbrella": "1f3d6-fe0f", "desert": "1f3dc-fe0f", "desert_island": "1f3dd-fe0f", "national_park": "1f3de-fe0f", "stadium": "1f3df-fe0f", "classical_building": "1f3db-fe0f", "building_construction": "1f3d7-fe0f", "bricks": "1f9f1", "rock": "1faa8", "wood": "1fab5", "hut": "1f6d6", "house_buildings": "1f3d8-fe0f", "houses": "1f3d8-fe0f", "derelict_house_building": "1f3da-fe0f", "derelict_house": "1f3da-fe0f", "house": "1f3e0", "house_with_garden": "1f3e1", "office": "1f3e2", "post_office": "1f3e3", "european_post_office": "1f3e4", "hospital": "1f3e5", "bank": "1f3e6", "hotel": "1f3e8", "love_hotel": "1f3e9", "convenience_store": "1f3ea", "school": "1f3eb", "department_store": "1f3ec", "factory": "1f3ed", "japanese_castle": "1f3ef", "european_castle": "1f3f0", "wedding": "1f492", "tokyo_tower": "1f5fc", "statue_of_liberty": "1f5fd", "church": "26ea", "mosque": "1f54c", "hindu_temple": "1f6d5", "synagogue": "1f54d", "shinto_shrine": "26e9-fe0f", "kaaba": "1f54b", "fountain": "26f2", "tent": "26fa", "foggy": "1f301", "night_with_stars": "1f303", "cityscape": "1f3d9-fe0f", "sunrise_over_mountains": "1f304", "sunrise": "1f305", "city_sunset": "1f306", "city_sunrise": "1f307", "bridge_at_night": "1f309", "hotsprings": "2668-fe0f", "carousel_horse": "1f3a0", "ferris_wheel": "1f3a1", "roller_coaster": "1f3a2", "barber": "1f488", "circus_tent": "1f3aa", "steam_locomotive": "1f682", "railway_car": "1f683", "bullettrain_side": "1f684", "bullettrain_front": "1f685", "train2": "1f686", "metro": "1f687", "light_rail": "1f688", "station": "1f689", "tram": "1f68a", "monorail": "1f69d", "mountain_railway": "1f69e", "train": "1f68b", "bus": "1f68c", "oncoming_bus": "1f68d", "trolleybus": "1f68e", "minibus": "1f690", "ambulance": "1f691", "fire_engine": "1f692", "police_car": "1f693", "oncoming_police_car": "1f694", "taxi": "1f695", "oncoming_taxi": "1f696", "car": "1f697", "red_car": "1f697", "oncoming_automobile": "1f698", "blue_car": "1f699", "pickup_truck": "1f6fb", "truck": "1f69a", "articulated_lorry": "1f69b", "tractor": "1f69c", "racing_car": "1f3ce-fe0f", "racing_motorcycle": "1f3cd-fe0f", "motorcycle": "1f3cd-fe0f", "motor_scooter": "1f6f5", "manual_wheelchair": "1f9bd", "motorized_wheelchair": "1f9bc", "auto_rickshaw": "1f6fa", "bike": "1f6b2", "scooter": "1f6f4", "kick_scooter": "1f6f4", "skateboard": "1f6f9", "roller_skate": "1f6fc", "busstop": "1f68f", "motorway": "1f6e3-fe0f", "railway_track": "1f6e4-fe0f", "oil_drum": "1f6e2-fe0f", "fuelpump": "26fd", "rotating_light": "1f6a8", "traffic_light": "1f6a5", "vertical_traffic_light": "1f6a6", "octagonal_sign": "1f6d1", "stop_sign": "1f6d1", "construction": "1f6a7", "anchor": "2693", "boat": "26f5", "sailboat": "26f5", "canoe": "1f6f6", "speedboat": "1f6a4", "passenger_ship": "1f6f3-fe0f", "ferry": "26f4-fe0f", "motor_boat": "1f6e5-fe0f", "ship": "1f6a2", "airplane": "2708-fe0f", "small_airplane": "1f6e9-fe0f", "airplane_departure": "1f6eb", "flight_departure": "1f6eb", "airplane_arriving": "1f6ec", "flight_arrival": "1f6ec", "parachute": "1fa82", "seat": "1f4ba", "helicopter": "1f681", "suspension_railway": "1f69f", "mountain_cableway": "1f6a0", "aerial_tramway": "1f6a1", "satellite": "1f6f0-fe0f", "artificial_satellite": "1f6f0-fe0f", "rocket": "1f680", "flying_saucer": "1f6f8", "bellhop_bell": "1f6ce-fe0f", "luggage": "1f9f3", "hourglass": "231b", "hourglass_flowing_sand": "23f3", "watch": "231a", "alarm_clock": "23f0", "stopwatch": "23f1-fe0f", "timer_clock": "23f2-fe0f", "mantelpiece_clock": "1f570-fe0f", "clock12": "1f55b", "clock1230": "1f567", "clock1": "1f550", "clock130": "1f55c", "clock2": "1f551", "clock230": "1f55d", "clock3": "1f552", "clock330": "1f55e", "clock4": "1f553", "clock430": "1f55f", "clock5": "1f554", "clock530": "1f560", "clock6": "1f555", "clock630": "1f561", "clock7": "1f556", "clock730": "1f562", "clock8": "1f557", "clock830": "1f563", "clock9": "1f558", "clock930": "1f564", "clock10": "1f559", "clock1030": "1f565", "clock11": "1f55a", "clock1130": "1f566", "new_moon": "1f311", "waxing_crescent_moon": "1f312", "first_quarter_moon": "1f313", "moon": "1f314", "waxing_gibbous_moon": "1f314", "full_moon": "1f315", "waning_gibbous_moon": "1f316", "last_quarter_moon": "1f317", "waning_crescent_moon": "1f318", "crescent_moon": "1f319", "new_moon_with_face": "1f31a", "first_quarter_moon_with_face": "1f31b", "last_quarter_moon_with_face": "1f31c", "thermometer": "1f321-fe0f", "sunny": "2600-fe0f", "full_moon_with_face": "1f31d", "sun_with_face": "1f31e", "ringed_planet": "1fa90", "star": "2b50", "star2": "1f31f", "stars": "1f320", "milky_way": "1f30c", "cloud": "2601-fe0f", "partly_sunny": "26c5", "thunder_cloud_and_rain": "26c8-fe0f", "cloud_with_lightning_and_rain": "26c8-fe0f", "mostly_sunny": "1f324-fe0f", "sun_small_cloud": "1f324-fe0f", "sun_behind_small_cloud": "1f324-fe0f", "barely_sunny": "1f325-fe0f", "sun_behind_cloud": "1f325-fe0f", "sun_behind_large_cloud": "1f325-fe0f", "partly_sunny_rain": "1f326-fe0f", "sun_behind_rain_cloud": "1f326-fe0f", "rain_cloud": "1f327-fe0f", "cloud_with_rain": "1f327-fe0f", "snow_cloud": "1f328-fe0f", "cloud_with_snow": "1f328-fe0f", "lightning": "1f329-fe0f", "lightning_cloud": "1f329-fe0f", "cloud_with_lightning": "1f329-fe0f", "tornado": "1f32a-fe0f", "tornado_cloud": "1f32a-fe0f", "fog": "1f32b-fe0f", "wind_blowing_face": "1f32c-fe0f", "wind_face": "1f32c-fe0f", "cyclone": "1f300", "rainbow": "1f308", "closed_umbrella": "1f302", "umbrella": "2602-fe0f", "open_umbrella": "2602-fe0f", "umbrella_with_rain_drops": "2614", "umbrella_on_ground": "26f1-fe0f", "parasol_on_ground": "26f1-fe0f", "zap": "26a1", "snowflake": "2744-fe0f", "snowman": "2603-fe0f", "snowman_with_snow": "2603-fe0f", "snowman_without_snow": "26c4", "comet": "2604-fe0f", "fire": "1f525", "droplet": "1f4a7", "ocean": "1f30a", "jack_o_lantern": "1f383", "christmas_tree": "1f384", "fireworks": "1f386", "sparkler": "1f387", "firecracker": "1f9e8", "sparkles": "2728", "balloon": "1f388", "tada": "1f389", "confetti_ball": "1f38a", "tanabata_tree": "1f38b", "bamboo": "1f38d", "dolls": "1f38e", "flags": "1f38f", "wind_chime": "1f390", "rice_scene": "1f391", "red_envelope": "1f9e7", "ribbon": "1f380", "gift": "1f381", "reminder_ribbon": "1f397-fe0f", "admission_tickets": "1f39f-fe0f", "tickets": "1f39f-fe0f", "ticket": "1f3ab", "medal": "1f396-fe0f", "medal_military": "1f396-fe0f", "trophy": "1f3c6", "sports_medal": "1f3c5", "medal_sports": "1f3c5", "first_place_medal": "1f947", "1st_place_medal": "1f947", "second_place_medal": "1f948", "2nd_place_medal": "1f948", "third_place_medal": "1f949", "3rd_place_medal": "1f949", "soccer": "26bd", "baseball": "26be", "softball": "1f94e", "basketball": "1f3c0", "volleyball": "1f3d0", "football": "1f3c8", "rugby_football": "1f3c9", "tennis": "1f3be", "flying_disc": "1f94f", "bowling": "1f3b3", "cricket_bat_and_ball": "1f3cf", "field_hockey_stick_and_ball": "1f3d1", "field_hockey": "1f3d1", "ice_hockey_stick_and_puck": "1f3d2", "ice_hockey": "1f3d2", "lacrosse": "1f94d", "table_tennis_paddle_and_ball": "1f3d3", "ping_pong": "1f3d3", "badminton_racquet_and_shuttlecock": "1f3f8", "badminton": "1f3f8", "boxing_glove": "1f94a", "martial_arts_uniform": "1f94b", "goal_net": "1f945", "golf": "26f3", "ice_skate": "26f8-fe0f", "fishing_pole_and_fish": "1f3a3", "diving_mask": "1f93f", "running_shirt_with_sash": "1f3bd", "ski": "1f3bf", "sled": "1f6f7", "curling_stone": "1f94c", "dart": "1f3af", "yo-yo": "1fa80", "kite": "1fa81", "8ball": "1f3b1", "crystal_ball": "1f52e", "magic_wand": "1fa84", "nazar_amulet": "1f9ff", "video_game": "1f3ae", "joystick": "1f579-fe0f", "slot_machine": "1f3b0", "game_die": "1f3b2", "jigsaw": "1f9e9", "teddy_bear": "1f9f8", "pinata": "1fa85", "nesting_dolls": "1fa86", "spades": "2660-fe0f", "hearts": "2665-fe0f", "diamonds": "2666-fe0f", "clubs": "2663-fe0f", "chess_pawn": "265f-fe0f", "black_joker": "1f0cf", "mahjong": "1f004", "flower_playing_cards": "1f3b4", "performing_arts": "1f3ad", "frame_with_picture": "1f5bc-fe0f", "framed_picture": "1f5bc-fe0f", "art": "1f3a8", "thread": "1f9f5", "sewing_needle": "1faa1", "yarn": "1f9f6", "knot": "1faa2", "eyeglasses": "1f453", "dark_sunglasses": "1f576-fe0f", "goggles": "1f97d", "lab_coat": "1f97c", "safety_vest": "1f9ba", "necktie": "1f454", "shirt": "1f455", "tshirt": "1f455", "jeans": "1f456", "scarf": "1f9e3", "gloves": "1f9e4", "coat": "1f9e5", "socks": "1f9e6", "dress": "1f457", "kimono": "1f458", "sari": "1f97b", "one-piece_swimsuit": "1fa71", "briefs": "1fa72", "shorts": "1fa73", "bikini": "1f459", "womans_clothes": "1f45a", "purse": "1f45b", "handbag": "1f45c", "pouch": "1f45d", "shopping_bags": "1f6cd-fe0f", "shopping": "1f6cd-fe0f", "school_satchel": "1f392", "thong_sandal": "1fa74", "mans_shoe": "1f45e", "shoe": "1f45e", "athletic_shoe": "1f45f", "hiking_boot": "1f97e", "womans_flat_shoe": "1f97f", "high_heel": "1f460", "sandal": "1f461", "ballet_shoes": "1fa70", "boot": "1f462", "crown": "1f451", "womans_hat": "1f452", "tophat": "1f3a9", "mortar_board": "1f393", "billed_cap": "1f9e2", "military_helmet": "1fa96", "helmet_with_white_cross": "26d1-fe0f", "rescue_worker_helmet": "26d1-fe0f", "prayer_beads": "1f4ff", "lipstick": "1f484", "ring": "1f48d", "gem": "1f48e", "mute": "1f507", "speaker": "1f508", "sound": "1f509", "loud_sound": "1f50a", "loudspeaker": "1f4e2", "mega": "1f4e3", "postal_horn": "1f4ef", "bell": "1f514", "no_bell": "1f515", "musical_score": "1f3bc", "musical_note": "1f3b5", "notes": "1f3b6", "studio_microphone": "1f399-fe0f", "level_slider": "1f39a-fe0f", "control_knobs": "1f39b-fe0f", "microphone": "1f3a4", "headphones": "1f3a7", "radio": "1f4fb", "saxophone": "1f3b7", "accordion": "1fa97", "guitar": "1f3b8", "musical_keyboard": "1f3b9", "trumpet": "1f3ba", "violin": "1f3bb", "banjo": "1fa95", "drum_with_drumsticks": "1f941", "drum": "1f941", "long_drum": "1fa98", "iphone": "1f4f1", "calling": "1f4f2", "phone": "260e-fe0f", "telephone": "260e-fe0f", "telephone_receiver": "1f4de", "pager": "1f4df", "fax": "1f4e0", "battery": "1f50b", "electric_plug": "1f50c", "computer": "1f4bb", "desktop_computer": "1f5a5-fe0f", "printer": "1f5a8-fe0f", "keyboard": "2328-fe0f", "three_button_mouse": "1f5b1-fe0f", "computer_mouse": "1f5b1-fe0f", "trackball": "1f5b2-fe0f", "minidisc": "1f4bd", "floppy_disk": "1f4be", "cd": "1f4bf", "dvd": "1f4c0", "abacus": "1f9ee", "movie_camera": "1f3a5", "film_frames": "1f39e-fe0f", "film_strip": "1f39e-fe0f", "film_projector": "1f4fd-fe0f", "clapper": "1f3ac", "tv": "1f4fa", "camera": "1f4f7", "camera_with_flash": "1f4f8", "camera_flash": "1f4f8", "video_camera": "1f4f9", "vhs": "1f4fc", "mag": "1f50d", "mag_right": "1f50e", "candle": "1f56f-fe0f", "bulb": "1f4a1", "flashlight": "1f526", "izakaya_lantern": "1f3ee", "lantern": "1f3ee", "diya_lamp": "1fa94", "notebook_with_decorative_cover": "1f4d4", "closed_book": "1f4d5", "book": "1f4d6", "open_book": "1f4d6", "green_book": "1f4d7", "blue_book": "1f4d8", "orange_book": "1f4d9", "books": "1f4da", "notebook": "1f4d3", "ledger": "1f4d2", "page_with_curl": "1f4c3", "scroll": "1f4dc", "page_facing_up": "1f4c4", "newspaper": "1f4f0", "rolled_up_newspaper": "1f5de-fe0f", "newspaper_roll": "1f5de-fe0f", "bookmark_tabs": "1f4d1", "bookmark": "1f516", "label": "1f3f7-fe0f", "moneybag": "1f4b0", "coin": "1fa99", "yen": "1f4b4", "dollar": "1f4b5", "euro": "1f4b6", "pound": "1f4b7", "money_with_wings": "1f4b8", "credit_card": "1f4b3", "receipt": "1f9fe", "chart": "1f4b9", "email": "2709-fe0f", "envelope": "2709-fe0f", "e-mail": "1f4e7", "incoming_envelope": "1f4e8", "envelope_with_arrow": "1f4e9", "outbox_tray": "1f4e4", "inbox_tray": "1f4e5", "package": "1f4e6", "mailbox": "1f4eb", "mailbox_closed": "1f4ea", "mailbox_with_mail": "1f4ec", "mailbox_with_no_mail": "1f4ed", "postbox": "1f4ee", "ballot_box_with_ballot": "1f5f3-fe0f", "ballot_box": "1f5f3-fe0f", "pencil2": "270f-fe0f", "black_nib": "2712-fe0f", "lower_left_fountain_pen": "1f58b-fe0f", "fountain_pen": "1f58b-fe0f", "lower_left_ballpoint_pen": "1f58a-fe0f", "pen": "1f58a-fe0f", "lower_left_paintbrush": "1f58c-fe0f", "paintbrush": "1f58c-fe0f", "lower_left_crayon": "1f58d-fe0f", "crayon": "1f58d-fe0f", "memo": "1f4dd", "pencil": "1f4dd", "briefcase": "1f4bc", "file_folder": "1f4c1", "open_file_folder": "1f4c2", "card_index_dividers": "1f5c2-fe0f", "date": "1f4c5", "calendar": "1f4c6", "spiral_note_pad": "1f5d2-fe0f", "spiral_notepad": "1f5d2-fe0f", "spiral_calendar_pad": "1f5d3-fe0f", "spiral_calendar": "1f5d3-fe0f", "card_index": "1f4c7", "chart_with_upwards_trend": "1f4c8", "chart_with_downwards_trend": "1f4c9", "bar_chart": "1f4ca", "clipboard": "1f4cb", "pushpin": "1f4cc", "round_pushpin": "1f4cd", "paperclip": "1f4ce", "linked_paperclips": "1f587-fe0f", "paperclips": "1f587-fe0f", "straight_ruler": "1f4cf", "triangular_ruler": "1f4d0", "scissors": "2702-fe0f", "card_file_box": "1f5c3-fe0f", "file_cabinet": "1f5c4-fe0f", "wastebasket": "1f5d1-fe0f", "lock": "1f512", "unlock": "1f513", "lock_with_ink_pen": "1f50f", "closed_lock_with_key": "1f510", "key": "1f511", "old_key": "1f5dd-fe0f", "hammer": "1f528", "axe": "1fa93", "pick": "26cf-fe0f", "hammer_and_pick": "2692-fe0f", "hammer_and_wrench": "1f6e0-fe0f", "dagger_knife": "1f5e1-fe0f", "dagger": "1f5e1-fe0f", "crossed_swords": "2694-fe0f", "gun": "1f52b", "boomerang": "1fa83", "bow_and_arrow": "1f3f9", "shield": "1f6e1-fe0f", "carpentry_saw": "1fa9a", "wrench": "1f527", "screwdriver": "1fa9b", "nut_and_bolt": "1f529", "gear": "2699-fe0f", "compression": "1f5dc-fe0f", "clamp": "1f5dc-fe0f", "scales": "2696-fe0f", "balance_scale": "2696-fe0f", "probing_cane": "1f9af", "link": "1f517", "chains": "26d3-fe0f", "hook": "1fa9d", "toolbox": "1f9f0", "magnet": "1f9f2", "ladder": "1fa9c", "alembic": "2697-fe0f", "test_tube": "1f9ea", "petri_dish": "1f9eb", "dna": "1f9ec", "microscope": "1f52c", "telescope": "1f52d", "satellite_antenna": "1f4e1", "syringe": "1f489", "drop_of_blood": "1fa78", "pill": "1f48a", "adhesive_bandage": "1fa79", "stethoscope": "1fa7a", "door": "1f6aa", "elevator": "1f6d7", "mirror": "1fa9e", "window": "1fa9f", "bed": "1f6cf-fe0f", "couch_and_lamp": "1f6cb-fe0f", "chair": "1fa91", "toilet": "1f6bd", "plunger": "1faa0", "shower": "1f6bf", "bathtub": "1f6c1", "mouse_trap": "1faa4", "razor": "1fa92", "lotion_bottle": "1f9f4", "safety_pin": "1f9f7", "broom": "1f9f9", "basket": "1f9fa", "roll_of_paper": "1f9fb", "bucket": "1faa3", "soap": "1f9fc", "toothbrush": "1faa5", "sponge": "1f9fd", "fire_extinguisher": "1f9ef", "shopping_trolley": "1f6d2", "shopping_cart": "1f6d2", "smoking": "1f6ac", "coffin": "26b0-fe0f", "headstone": "1faa6", "funeral_urn": "26b1-fe0f", "moyai": "1f5ff", "placard": "1faa7", "atm": "1f3e7", "put_litter_in_its_place": "1f6ae", "potable_water": "1f6b0", "wheelchair": "267f", "mens": "1f6b9", "womens": "1f6ba", "restroom": "1f6bb", "baby_symbol": "1f6bc", "wc": "1f6be", "passport_control": "1f6c2", "customs": "1f6c3", "baggage_claim": "1f6c4", "left_luggage": "1f6c5", "warning": "26a0-fe0f", "children_crossing": "1f6b8", "no_entry": "26d4", "no_entry_sign": "1f6ab", "no_bicycles": "1f6b3", "no_smoking": "1f6ad", "do_not_litter": "1f6af", "non-potable_water": "1f6b1", "no_pedestrians": "1f6b7", "no_mobile_phones": "1f4f5", "underage": "1f51e", "radioactive_sign": "2622-fe0f", "radioactive": "2622-fe0f", "biohazard_sign": "2623-fe0f", "biohazard": "2623-fe0f", "arrow_up": "2b06-fe0f", "arrow_upper_right": "2197-fe0f", "arrow_right": "27a1-fe0f", "arrow_lower_right": "2198-fe0f", "arrow_down": "2b07-fe0f", "arrow_lower_left": "2199-fe0f", "arrow_left": "2b05-fe0f", "arrow_upper_left": "2196-fe0f", "arrow_up_down": "2195-fe0f", "left_right_arrow": "2194-fe0f", "leftwards_arrow_with_hook": "21a9-fe0f", "arrow_right_hook": "21aa-fe0f", "arrow_heading_up": "2934-fe0f", "arrow_heading_down": "2935-fe0f", "arrows_clockwise": "1f503", "arrows_counterclockwise": "1f504", "back": "1f519", "end": "1f51a", "on": "1f51b", "soon": "1f51c", "top": "1f51d", "place_of_worship": "1f6d0", "atom_symbol": "269b-fe0f", "om_symbol": "1f549-fe0f", "om": "1f549-fe0f", "star_of_david": "2721-fe0f", "wheel_of_dharma": "2638-fe0f", "yin_yang": "262f-fe0f", "latin_cross": "271d-fe0f", "orthodox_cross": "2626-fe0f", "star_and_crescent": "262a-fe0f", "peace_symbol": "262e-fe0f", "menorah_with_nine_branches": "1f54e", "menorah": "1f54e", "six_pointed_star": "1f52f", "aries": "2648", "taurus": "2649", "gemini": "264a", "cancer": "264b", "leo": "264c", "virgo": "264d", "libra": "264e", "scorpius": "264f", "sagittarius": "2650", "capricorn": "2651", "aquarius": "2652", "pisces": "2653", "ophiuchus": "26ce", "twisted_rightwards_arrows": "1f500", "repeat": "1f501", "repeat_one": "1f502", "arrow_forward": "25b6-fe0f", "fast_forward": "23e9", "black_right_pointing_double_triangle_with_vertical_bar": "23ed-fe0f", "next_track_button": "23ed-fe0f", "black_right_pointing_triangle_with_double_vertical_bar": "23ef-fe0f", "play_or_pause_button": "23ef-fe0f", "arrow_backward": "25c0-fe0f", "rewind": "23ea", "black_left_pointing_double_triangle_with_vertical_bar": "23ee-fe0f", "previous_track_button": "23ee-fe0f", "arrow_up_small": "1f53c", "arrow_double_up": "23eb", "arrow_down_small": "1f53d", "arrow_double_down": "23ec", "double_vertical_bar": "23f8-fe0f", "pause_button": "23f8-fe0f", "black_square_for_stop": "23f9-fe0f", "stop_button": "23f9-fe0f", "black_circle_for_record": "23fa-fe0f", "record_button": "23fa-fe0f", "eject": "23cf-fe0f", "cinema": "1f3a6", "low_brightness": "1f505", "high_brightness": "1f506", "signal_strength": "1f4f6", "vibration_mode": "1f4f3", "mobile_phone_off": "1f4f4", "female_sign": "2640-fe0f", "male_sign": "2642-fe0f", "transgender_symbol": "26a7-fe0f", "heavy_multiplication_x": "2716-fe0f", "heavy_plus_sign": "2795", "heavy_minus_sign": "2796", "heavy_division_sign": "2797", "infinity": "267e-fe0f", "bangbang": "203c-fe0f", "interrobang": "2049-fe0f", "question": "2753", "grey_question": "2754", "grey_exclamation": "2755", "exclamation": "2757", "heavy_exclamation_mark": "2757", "wavy_dash": "3030-fe0f", "currency_exchange": "1f4b1", "heavy_dollar_sign": "1f4b2", "medical_symbol": "2695-fe0f", "staff_of_aesculapius": "2695-fe0f", "recycle": "267b-fe0f", "fleur_de_lis": "269c-fe0f", "trident": "1f531", "name_badge": "1f4db", "beginner": "1f530", "o": "2b55", "white_check_mark": "2705", "ballot_box_with_check": "2611-fe0f", "heavy_check_mark": "2714-fe0f", "x": "274c", "negative_squared_cross_mark": "274e", "curly_loop": "27b0", "loop": "27bf", "part_alternation_mark": "303d-fe0f", "eight_spoked_asterisk": "2733-fe0f", "eight_pointed_black_star": "2734-fe0f", "sparkle": "2747-fe0f", "copyright": "00a9-fe0f", "registered": "00ae-fe0f", "tm": "2122-fe0f", "hash": "0023-fe0f-20e3", "keycap_star": "002a-fe0f-20e3", "asterisk": "002a-fe0f-20e3", "zero": "0030-fe0f-20e3", "one": "0031-fe0f-20e3", "two": "0032-fe0f-20e3", "three": "0033-fe0f-20e3", "four": "0034-fe0f-20e3", "five": "0035-fe0f-20e3", "six": "0036-fe0f-20e3", "seven": "0037-fe0f-20e3", "eight": "0038-fe0f-20e3", "nine": "0039-fe0f-20e3", "keycap_ten": "1f51f", "capital_abcd": "1f520", "abcd": "1f521", "1234": "1f522", "symbols": "1f523", "abc": "1f524", "a": "1f170-fe0f", "ab": "1f18e", "b": "1f171-fe0f", "cl": "1f191", "cool": "1f192", "free": "1f193", "information_source": "2139-fe0f", "id": "1f194", "m": "24c2-fe0f", "new": "1f195", "ng": "1f196", "o2": "1f17e-fe0f", "ok": "1f197", "parking": "1f17f-fe0f", "sos": "1f198", "up": "1f199", "vs": "1f19a", "koko": "1f201", "sa": "1f202-fe0f", "u6708": "1f237-fe0f", "u6709": "1f236", "u6307": "1f22f", "ideograph_advantage": "1f250", "u5272": "1f239", "u7121": "1f21a", "u7981": "1f232", "accept": "1f251", "u7533": "1f238", "u5408": "1f234", "u7a7a": "1f233", "congratulations": "3297-fe0f", "secret": "3299-fe0f", "u55b6": "1f23a", "u6e80": "1f235", "red_circle": "1f534", "large_orange_circle": "1f7e0", "large_yellow_circle": "1f7e1", "large_green_circle": "1f7e2", "large_blue_circle": "1f535", "large_purple_circle": "1f7e3", "large_brown_circle": "1f7e4", "black_circle": "26ab", "white_circle": "26aa", "large_red_square": "1f7e5", "large_orange_square": "1f7e7", "large_yellow_square": "1f7e8", "large_green_square": "1f7e9", "large_blue_square": "1f7e6", "large_purple_square": "1f7ea", "large_brown_square": "1f7eb", "black_large_square": "2b1b", "white_large_square": "2b1c", "black_medium_square": "25fc-fe0f", "white_medium_square": "25fb-fe0f", "black_medium_small_square": "25fe", "white_medium_small_square": "25fd", "black_small_square": "25aa-fe0f", "white_small_square": "25ab-fe0f", "large_orange_diamond": "1f536", "large_blue_diamond": "1f537", "small_orange_diamond": "1f538", "small_blue_diamond": "1f539", "small_red_triangle": "1f53a", "small_red_triangle_down": "1f53b", "diamond_shape_with_a_dot_inside": "1f4a0", "radio_button": "1f518", "white_square_button": "1f533", "black_square_button": "1f532", "checkered_flag": "1f3c1", "triangular_flag_on_post": "1f6a9", "crossed_flags": "1f38c", "waving_black_flag": "1f3f4", "black_flag": "1f3f4", "waving_white_flag": "1f3f3-fe0f", "white_flag": "1f3f3-fe0f", "rainbow-flag": "1f3f3-fe0f-200d-1f308", "rainbow_flag": "1f3f3-fe0f-200d-1f308", "transgender_flag": "1f3f3-fe0f-200d-26a7-fe0f", "pirate_flag": "1f3f4-200d-2620-fe0f", "flag-ac": "1f1e6-1f1e8", "flag-ad": "1f1e6-1f1e9", "andorra": "1f1e6-1f1e9", "flag-ae": "1f1e6-1f1ea", "united_arab_emirates": "1f1e6-1f1ea", "flag-af": "1f1e6-1f1eb", "afghanistan": "1f1e6-1f1eb", "flag-ag": "1f1e6-1f1ec", "antigua_barbuda": "1f1e6-1f1ec", "flag-ai": "1f1e6-1f1ee", "anguilla": "1f1e6-1f1ee", "flag-al": "1f1e6-1f1f1", "albania": "1f1e6-1f1f1", "flag-am": "1f1e6-1f1f2", "armenia": "1f1e6-1f1f2", "flag-ao": "1f1e6-1f1f4", "angola": "1f1e6-1f1f4", "flag-aq": "1f1e6-1f1f6", "antarctica": "1f1e6-1f1f6", "flag-ar": "1f1e6-1f1f7", "argentina": "1f1e6-1f1f7", "flag-as": "1f1e6-1f1f8", "american_samoa": "1f1e6-1f1f8", "flag-at": "1f1e6-1f1f9", "austria": "1f1e6-1f1f9", "flag-au": "1f1e6-1f1fa", "australia": "1f1e6-1f1fa", "flag-aw": "1f1e6-1f1fc", "aruba": "1f1e6-1f1fc", "flag-ax": "1f1e6-1f1fd", "aland_islands": "1f1e6-1f1fd", "flag-az": "1f1e6-1f1ff", "azerbaijan": "1f1e6-1f1ff", "flag-ba": "1f1e7-1f1e6", "bosnia_herzegovina": "1f1e7-1f1e6", "flag-bb": "1f1e7-1f1e7", "barbados": "1f1e7-1f1e7", "flag-bd": "1f1e7-1f1e9", "bangladesh": "1f1e7-1f1e9", "flag-be": "1f1e7-1f1ea", "belgium": "1f1e7-1f1ea", "flag-bf": "1f1e7-1f1eb", "burkina_faso": "1f1e7-1f1eb", "flag-bg": "1f1e7-1f1ec", "bulgaria": "1f1e7-1f1ec", "flag-bh": "1f1e7-1f1ed", "bahrain": "1f1e7-1f1ed", "flag-bi": "1f1e7-1f1ee", "burundi": "1f1e7-1f1ee", "flag-bj": "1f1e7-1f1ef", "benin": "1f1e7-1f1ef", "flag-bl": "1f1e7-1f1f1", "st_barthelemy": "1f1e7-1f1f1", "flag-bm": "1f1e7-1f1f2", "bermuda": "1f1e7-1f1f2", "flag-bn": "1f1e7-1f1f3", "brunei": "1f1e7-1f1f3", "flag-bo": "1f1e7-1f1f4", "bolivia": "1f1e7-1f1f4", "flag-bq": "1f1e7-1f1f6", "caribbean_netherlands": "1f1e7-1f1f6", "flag-br": "1f1e7-1f1f7", "brazil": "1f1e7-1f1f7", "flag-bs": "1f1e7-1f1f8", "bahamas": "1f1e7-1f1f8", "flag-bt": "1f1e7-1f1f9", "bhutan": "1f1e7-1f1f9", "flag-bv": "1f1e7-1f1fb", "flag-bw": "1f1e7-1f1fc", "botswana": "1f1e7-1f1fc", "flag-by": "1f1e7-1f1fe", "belarus": "1f1e7-1f1fe", "flag-bz": "1f1e7-1f1ff", "belize": "1f1e7-1f1ff", "flag-ca": "1f1e8-1f1e6", "ca": "1f1e8-1f1e6", "canada": "1f1e8-1f1e6", "flag-cc": "1f1e8-1f1e8", "cocos_islands": "1f1e8-1f1e8", "flag-cd": "1f1e8-1f1e9", "congo_kinshasa": "1f1e8-1f1e9", "flag-cf": "1f1e8-1f1eb", "central_african_republic": "1f1e8-1f1eb", "flag-cg": "1f1e8-1f1ec", "congo_brazzaville": "1f1e8-1f1ec", "flag-ch": "1f1e8-1f1ed", "switzerland": "1f1e8-1f1ed", "flag-ci": "1f1e8-1f1ee", "cote_divoire": "1f1e8-1f1ee", "flag-ck": "1f1e8-1f1f0", "cook_islands": "1f1e8-1f1f0", "flag-cl": "1f1e8-1f1f1", "chile": "1f1e8-1f1f1", "flag-cm": "1f1e8-1f1f2", "cameroon": "1f1e8-1f1f2", "cn": "1f1e8-1f1f3", "flag-cn": "1f1e8-1f1f3", "flag-co": "1f1e8-1f1f4", "colombia": "1f1e8-1f1f4", "flag-cp": "1f1e8-1f1f5", "flag-cr": "1f1e8-1f1f7", "costa_rica": "1f1e8-1f1f7", "flag-cu": "1f1e8-1f1fa", "cuba": "1f1e8-1f1fa", "flag-cv": "1f1e8-1f1fb", "cape_verde": "1f1e8-1f1fb", "flag-cw": "1f1e8-1f1fc", "curacao": "1f1e8-1f1fc", "flag-cx": "1f1e8-1f1fd", "christmas_island": "1f1e8-1f1fd", "flag-cy": "1f1e8-1f1fe", "cyprus": "1f1e8-1f1fe", "flag-cz": "1f1e8-1f1ff", "czech_republic": "1f1e8-1f1ff", "de": "1f1e9-1f1ea", "flag-de": "1f1e9-1f1ea", "flag-dg": "1f1e9-1f1ec", "flag-dj": "1f1e9-1f1ef", "djibouti": "1f1e9-1f1ef", "flag-dk": "1f1e9-1f1f0", "denmark": "1f1e9-1f1f0", "flag-dm": "1f1e9-1f1f2", "dominica": "1f1e9-1f1f2", "flag-do": "1f1e9-1f1f4", "dominican_republic": "1f1e9-1f1f4", "flag-dz": "1f1e9-1f1ff", "algeria": "1f1e9-1f1ff", "flag-ea": "1f1ea-1f1e6", "flag-ec": "1f1ea-1f1e8", "ecuador": "1f1ea-1f1e8", "flag-ee": "1f1ea-1f1ea", "estonia": "1f1ea-1f1ea", "flag-eg": "1f1ea-1f1ec", "egypt": "1f1ea-1f1ec", "flag-eh": "1f1ea-1f1ed", "western_sahara": "1f1ea-1f1ed", "flag-er": "1f1ea-1f1f7", "eritrea": "1f1ea-1f1f7", "es": "1f1ea-1f1f8", "flag-es": "1f1ea-1f1f8", "flag-et": "1f1ea-1f1f9", "ethiopia": "1f1ea-1f1f9", "flag-eu": "1f1ea-1f1fa", "eu": "1f1ea-1f1fa", "european_union": "1f1ea-1f1fa", "flag-fi": "1f1eb-1f1ee", "finland": "1f1eb-1f1ee", "flag-fj": "1f1eb-1f1ef", "fiji": "1f1eb-1f1ef", "flag-fk": "1f1eb-1f1f0", "falkland_islands": "1f1eb-1f1f0", "flag-fm": "1f1eb-1f1f2", "micronesia": "1f1eb-1f1f2", "flag-fo": "1f1eb-1f1f4", "faroe_islands": "1f1eb-1f1f4", "fr": "1f1eb-1f1f7", "flag-fr": "1f1eb-1f1f7", "flag-ga": "1f1ec-1f1e6", "gabon": "1f1ec-1f1e6", "gb": "1f1ec-1f1e7", "uk": "1f1ec-1f1e7", "flag-gb": "1f1ec-1f1e7", "flag-gd": "1f1ec-1f1e9", "grenada": "1f1ec-1f1e9", "flag-ge": "1f1ec-1f1ea", "georgia": "1f1ec-1f1ea", "flag-gf": "1f1ec-1f1eb", "french_guiana": "1f1ec-1f1eb", "flag-gg": "1f1ec-1f1ec", "guernsey": "1f1ec-1f1ec", "flag-gh": "1f1ec-1f1ed", "ghana": "1f1ec-1f1ed", "flag-gi": "1f1ec-1f1ee", "gibraltar": "1f1ec-1f1ee", "flag-gl": "1f1ec-1f1f1", "greenland": "1f1ec-1f1f1", "flag-gm": "1f1ec-1f1f2", "gambia": "1f1ec-1f1f2", "flag-gn": "1f1ec-1f1f3", "guinea": "1f1ec-1f1f3", "flag-gp": "1f1ec-1f1f5", "guadeloupe": "1f1ec-1f1f5", "flag-gq": "1f1ec-1f1f6", "equatorial_guinea": "1f1ec-1f1f6", "flag-gr": "1f1ec-1f1f7", "greece": "1f1ec-1f1f7", "flag-gs": "1f1ec-1f1f8", "south_georgia_south_sandwich_islands": "1f1ec-1f1f8", "flag-gt": "1f1ec-1f1f9", "guatemala": "1f1ec-1f1f9", "flag-gu": "1f1ec-1f1fa", "guam": "1f1ec-1f1fa", "flag-gw": "1f1ec-1f1fc", "guinea_bissau": "1f1ec-1f1fc", "flag-gy": "1f1ec-1f1fe", "guyana": "1f1ec-1f1fe", "flag-hk": "1f1ed-1f1f0", "hong_kong": "1f1ed-1f1f0", "flag-hm": "1f1ed-1f1f2", "flag-hn": "1f1ed-1f1f3", "honduras": "1f1ed-1f1f3", "flag-hr": "1f1ed-1f1f7", "croatia": "1f1ed-1f1f7", "flag-ht": "1f1ed-1f1f9", "haiti": "1f1ed-1f1f9", "flag-hu": "1f1ed-1f1fa", "hungary": "1f1ed-1f1fa", "flag-ic": "1f1ee-1f1e8", "canary_islands": "1f1ee-1f1e8", "flag-id": "1f1ee-1f1e9", "indonesia": "1f1ee-1f1e9", "flag-ie": "1f1ee-1f1ea", "ireland": "1f1ee-1f1ea", "flag-il": "1f1ee-1f1f1", "israel": "1f1ee-1f1f1", "flag-im": "1f1ee-1f1f2", "isle_of_man": "1f1ee-1f1f2", "flag-in": "1f1ee-1f1f3", "india": "1f1ee-1f1f3", "flag-io": "1f1ee-1f1f4", "british_indian_ocean_territory": "1f1ee-1f1f4", "flag-iq": "1f1ee-1f1f6", "iraq": "1f1ee-1f1f6", "flag-ir": "1f1ee-1f1f7", "iran": "1f1ee-1f1f7", "flag-is": "1f1ee-1f1f8", "iceland": "1f1ee-1f1f8", "it": "1f1ee-1f1f9", "flag-it": "1f1ee-1f1f9", "flag-je": "1f1ef-1f1ea", "jersey": "1f1ef-1f1ea", "flag-jm": "1f1ef-1f1f2", "jamaica": "1f1ef-1f1f2", "flag-jo": "1f1ef-1f1f4", "jordan": "1f1ef-1f1f4", "jp": "1f1ef-1f1f5", "flag-jp": "1f1ef-1f1f5", "flag-ke": "1f1f0-1f1ea", "kenya": "1f1f0-1f1ea", "flag-kg": "1f1f0-1f1ec", "kyrgyzstan": "1f1f0-1f1ec", "flag-kh": "1f1f0-1f1ed", "cambodia": "1f1f0-1f1ed", "flag-ki": "1f1f0-1f1ee", "kiribati": "1f1f0-1f1ee", "flag-km": "1f1f0-1f1f2", "comoros": "1f1f0-1f1f2", "flag-kn": "1f1f0-1f1f3", "st_kitts_nevis": "1f1f0-1f1f3", "flag-kp": "1f1f0-1f1f5", "north_korea": "1f1f0-1f1f5", "kr": "1f1f0-1f1f7", "flag-kr": "1f1f0-1f1f7", "flag-kw": "1f1f0-1f1fc", "kuwait": "1f1f0-1f1fc", "flag-ky": "1f1f0-1f1fe", "cayman_islands": "1f1f0-1f1fe", "flag-kz": "1f1f0-1f1ff", "kazakhstan": "1f1f0-1f1ff", "flag-la": "1f1f1-1f1e6", "laos": "1f1f1-1f1e6", "flag-lb": "1f1f1-1f1e7", "lebanon": "1f1f1-1f1e7", "flag-lc": "1f1f1-1f1e8", "st_lucia": "1f1f1-1f1e8", "flag-li": "1f1f1-1f1ee", "liechtenstein": "1f1f1-1f1ee", "flag-lk": "1f1f1-1f1f0", "sri_lanka": "1f1f1-1f1f0", "flag-lr": "1f1f1-1f1f7", "liberia": "1f1f1-1f1f7", "flag-ls": "1f1f1-1f1f8", "lesotho": "1f1f1-1f1f8", "flag-lt": "1f1f1-1f1f9", "lithuania": "1f1f1-1f1f9", "flag-lu": "1f1f1-1f1fa", "luxembourg": "1f1f1-1f1fa", "flag-lv": "1f1f1-1f1fb", "latvia": "1f1f1-1f1fb", "flag-ly": "1f1f1-1f1fe", "libya": "1f1f1-1f1fe", "flag-ma": "1f1f2-1f1e6", "morocco": "1f1f2-1f1e6", "flag-mc": "1f1f2-1f1e8", "monaco": "1f1f2-1f1e8", "flag-md": "1f1f2-1f1e9", "moldova": "1f1f2-1f1e9", "flag-me": "1f1f2-1f1ea", "montenegro": "1f1f2-1f1ea", "flag-mf": "1f1f2-1f1eb", "flag-mg": "1f1f2-1f1ec", "madagascar": "1f1f2-1f1ec", "flag-mh": "1f1f2-1f1ed", "marshall_islands": "1f1f2-1f1ed", "flag-mk": "1f1f2-1f1f0", "macedonia": "1f1f2-1f1f0", "flag-ml": "1f1f2-1f1f1", "mali": "1f1f2-1f1f1", "flag-mm": "1f1f2-1f1f2", "myanmar": "1f1f2-1f1f2", "flag-mn": "1f1f2-1f1f3", "mongolia": "1f1f2-1f1f3", "flag-mo": "1f1f2-1f1f4", "macau": "1f1f2-1f1f4", "flag-mp": "1f1f2-1f1f5", "northern_mariana_islands": "1f1f2-1f1f5", "flag-mq": "1f1f2-1f1f6", "martinique": "1f1f2-1f1f6", "flag-mr": "1f1f2-1f1f7", "mauritania": "1f1f2-1f1f7", "flag-ms": "1f1f2-1f1f8", "montserrat": "1f1f2-1f1f8", "flag-mt": "1f1f2-1f1f9", "malta": "1f1f2-1f1f9", "flag-mu": "1f1f2-1f1fa", "mauritius": "1f1f2-1f1fa", "flag-mv": "1f1f2-1f1fb", "maldives": "1f1f2-1f1fb", "flag-mw": "1f1f2-1f1fc", "malawi": "1f1f2-1f1fc", "flag-mx": "1f1f2-1f1fd", "mexico": "1f1f2-1f1fd", "flag-my": "1f1f2-1f1fe", "malaysia": "1f1f2-1f1fe", "flag-mz": "1f1f2-1f1ff", "mozambique": "1f1f2-1f1ff", "flag-na": "1f1f3-1f1e6", "namibia": "1f1f3-1f1e6", "flag-nc": "1f1f3-1f1e8", "new_caledonia": "1f1f3-1f1e8", "flag-ne": "1f1f3-1f1ea", "niger": "1f1f3-1f1ea", "flag-nf": "1f1f3-1f1eb", "norfolk_island": "1f1f3-1f1eb", "flag-ng": "1f1f3-1f1ec", "nigeria": "1f1f3-1f1ec", "flag-ni": "1f1f3-1f1ee", "nicaragua": "1f1f3-1f1ee", "flag-nl": "1f1f3-1f1f1", "netherlands": "1f1f3-1f1f1", "flag-no": "1f1f3-1f1f4", "norway": "1f1f3-1f1f4", "flag-np": "1f1f3-1f1f5", "nepal": "1f1f3-1f1f5", "flag-nr": "1f1f3-1f1f7", "nauru": "1f1f3-1f1f7", "flag-nu": "1f1f3-1f1fa", "niue": "1f1f3-1f1fa", "flag-nz": "1f1f3-1f1ff", "new_zealand": "1f1f3-1f1ff", "flag-om": "1f1f4-1f1f2", "oman": "1f1f4-1f1f2", "flag-pa": "1f1f5-1f1e6", "panama": "1f1f5-1f1e6", "flag-pe": "1f1f5-1f1ea", "peru": "1f1f5-1f1ea", "flag-pf": "1f1f5-1f1eb", "french_polynesia": "1f1f5-1f1eb", "flag-pg": "1f1f5-1f1ec", "papua_new_guinea": "1f1f5-1f1ec", "flag-ph": "1f1f5-1f1ed", "philippines": "1f1f5-1f1ed", "flag-pk": "1f1f5-1f1f0", "pakistan": "1f1f5-1f1f0", "pk": "1f1f5-1f1f0", "flag-pl": "1f1f5-1f1f1", "poland": "1f1f5-1f1f1", "flag-pm": "1f1f5-1f1f2", "st_pierre_miquelon": "1f1f5-1f1f2", "flag-pn": "1f1f5-1f1f3", "pitcairn_islands": "1f1f5-1f1f3", "flag-pr": "1f1f5-1f1f7", "puerto_rico": "1f1f5-1f1f7", "flag-ps": "1f1f5-1f1f8", "palestinian_territories": "1f1f5-1f1f8", "flag-pt": "1f1f5-1f1f9", "portugal": "1f1f5-1f1f9", "flag-pw": "1f1f5-1f1fc", "palau": "1f1f5-1f1fc", "flag-py": "1f1f5-1f1fe", "paraguay": "1f1f5-1f1fe", "flag-qa": "1f1f6-1f1e6", "qatar": "1f1f6-1f1e6", "flag-re": "1f1f7-1f1ea", "reunion": "1f1f7-1f1ea", "flag-ro": "1f1f7-1f1f4", "romania": "1f1f7-1f1f4", "flag-rs": "1f1f7-1f1f8", "serbia": "1f1f7-1f1f8", "ru": "1f1f7-1f1fa", "flag-ru": "1f1f7-1f1fa", "flag-rw": "1f1f7-1f1fc", "rwanda": "1f1f7-1f1fc", "flag-sa": "1f1f8-1f1e6", "saudi_arabia": "1f1f8-1f1e6", "flag-sb": "1f1f8-1f1e7", "solomon_islands": "1f1f8-1f1e7", "flag-sc": "1f1f8-1f1e8", "seychelles": "1f1f8-1f1e8", "flag-sd": "1f1f8-1f1e9", "sudan": "1f1f8-1f1e9", "flag-se": "1f1f8-1f1ea", "sweden": "1f1f8-1f1ea", "flag-sg": "1f1f8-1f1ec", "singapore": "1f1f8-1f1ec", "flag-sh": "1f1f8-1f1ed", "st_helena": "1f1f8-1f1ed", "flag-si": "1f1f8-1f1ee", "slovenia": "1f1f8-1f1ee", "flag-sj": "1f1f8-1f1ef", "flag-sk": "1f1f8-1f1f0", "slovakia": "1f1f8-1f1f0", "flag-sl": "1f1f8-1f1f1", "sierra_leone": "1f1f8-1f1f1", "flag-sm": "1f1f8-1f1f2", "san_marino": "1f1f8-1f1f2", "flag-sn": "1f1f8-1f1f3", "senegal": "1f1f8-1f1f3", "flag-so": "1f1f8-1f1f4", "somalia": "1f1f8-1f1f4", "flag-sr": "1f1f8-1f1f7", "suriname": "1f1f8-1f1f7", "flag-ss": "1f1f8-1f1f8", "south_sudan": "1f1f8-1f1f8", "flag-st": "1f1f8-1f1f9", "sao_tome_principe": "1f1f8-1f1f9", "flag-sv": "1f1f8-1f1fb", "el_salvador": "1f1f8-1f1fb", "flag-sx": "1f1f8-1f1fd", "sint_maarten": "1f1f8-1f1fd", "flag-sy": "1f1f8-1f1fe", "syria": "1f1f8-1f1fe", "flag-sz": "1f1f8-1f1ff", "swaziland": "1f1f8-1f1ff", "flag-ta": "1f1f9-1f1e6", "flag-tc": "1f1f9-1f1e8", "turks_caicos_islands": "1f1f9-1f1e8", "flag-td": "1f1f9-1f1e9", "chad": "1f1f9-1f1e9", "flag-tf": "1f1f9-1f1eb", "french_southern_territories": "1f1f9-1f1eb", "flag-tg": "1f1f9-1f1ec", "togo": "1f1f9-1f1ec", "flag-th": "1f1f9-1f1ed", "thailand": "1f1f9-1f1ed", "flag-tj": "1f1f9-1f1ef", "tajikistan": "1f1f9-1f1ef", "flag-tk": "1f1f9-1f1f0", "tokelau": "1f1f9-1f1f0", "flag-tl": "1f1f9-1f1f1", "timor_leste": "1f1f9-1f1f1", "flag-tm": "1f1f9-1f1f2", "turkmenistan": "1f1f9-1f1f2", "flag-tn": "1f1f9-1f1f3", "tunisia": "1f1f9-1f1f3", "flag-to": "1f1f9-1f1f4", "tonga": "1f1f9-1f1f4", "flag-tr": "1f1f9-1f1f7", "tr": "1f1f9-1f1f7", "flag-tt": "1f1f9-1f1f9", "trinidad_tobago": "1f1f9-1f1f9", "flag-tv": "1f1f9-1f1fb", "tuvalu": "1f1f9-1f1fb", "flag-tw": "1f1f9-1f1fc", "taiwan": "1f1f9-1f1fc", "flag-tz": "1f1f9-1f1ff", "tanzania": "1f1f9-1f1ff", "flag-ua": "1f1fa-1f1e6", "ukraine": "1f1fa-1f1e6", "flag-ug": "1f1fa-1f1ec", "uganda": "1f1fa-1f1ec", "flag-um": "1f1fa-1f1f2", "flag-un": "1f1fa-1f1f3", "us": "1f1fa-1f1f8", "flag-us": "1f1fa-1f1f8", "flag-uy": "1f1fa-1f1fe", "uruguay": "1f1fa-1f1fe", "flag-uz": "1f1fa-1f1ff", "uzbekistan": "1f1fa-1f1ff", "flag-va": "1f1fb-1f1e6", "vatican_city": "1f1fb-1f1e6", "flag-vc": "1f1fb-1f1e8", "st_vincent_grenadines": "1f1fb-1f1e8", "flag-ve": "1f1fb-1f1ea", "venezuela": "1f1fb-1f1ea", "flag-vg": "1f1fb-1f1ec", "british_virgin_islands": "1f1fb-1f1ec", "flag-vi": "1f1fb-1f1ee", "us_virgin_islands": "1f1fb-1f1ee", "flag-vn": "1f1fb-1f1f3", "vietnam": "1f1fb-1f1f3", "flag-vu": "1f1fb-1f1fa", "vanuatu": "1f1fb-1f1fa", "flag-wf": "1f1fc-1f1eb", "wallis_futuna": "1f1fc-1f1eb", "flag-ws": "1f1fc-1f1f8", "samoa": "1f1fc-1f1f8", "flag-xk": "1f1fd-1f1f0", "kosovo": "1f1fd-1f1f0", "flag-ye": "1f1fe-1f1ea", "yemen": "1f1fe-1f1ea", "flag-yt": "1f1fe-1f1f9", "mayotte": "1f1fe-1f1f9", "flag-za": "1f1ff-1f1e6", "south_africa": "1f1ff-1f1e6", "za": "1f1ff-1f1e6", "flag-zm": "1f1ff-1f1f2", "zambia": "1f1ff-1f1f2", "flag-zw": "1f1ff-1f1fc", "zimbabwe": "1f1ff-1f1fc", "flag-england": "1f3f4-e0067-e0062-e0065-e006e-e0067-e007f", "flag-scotland": "1f3f4-e0067-e0062-e0073-e0063-e0074-e007f", "flag-wales": "1f3f4-e0067-e0062-e0077-e006c-e0073-e007f", "santa_light_skin_tone": "1f385-1f3fb", "santa_medium_light_skin_tone": "1f385-1f3fc", "santa_medium_skin_tone": "1f385-1f3fd", "santa_medium_dark_skin_tone": "1f385-1f3fe", "santa_dark_skin_tone": "1f385-1f3ff", "snowboarder_light_skin_tone": "1f3c2-1f3fb", "snowboarder_medium_light_skin_tone": "1f3c2-1f3fc", "snowboarder_medium_skin_tone": "1f3c2-1f3fd", "snowboarder_medium_dark_skin_tone": "1f3c2-1f3fe", "snowboarder_dark_skin_tone": "1f3c2-1f3ff", "woman-running_light_skin_tone": "1f3c3-1f3fb-200d-2640-fe0f", "running_woman_light_skin_tone": "1f3c3-1f3fb-200d-2640-fe0f", "woman-running_medium_light_skin_tone": "1f3c3-1f3fc-200d-2640-fe0f", "running_woman_medium_light_skin_tone": "1f3c3-1f3fc-200d-2640-fe0f", "woman-running_medium_skin_tone": "1f3c3-1f3fd-200d-2640-fe0f", "running_woman_medium_skin_tone": "1f3c3-1f3fd-200d-2640-fe0f", "woman-running_medium_dark_skin_tone": "1f3c3-1f3fe-200d-2640-fe0f", "running_woman_medium_dark_skin_tone": "1f3c3-1f3fe-200d-2640-fe0f", "woman-running_dark_skin_tone": "1f3c3-1f3ff-200d-2640-fe0f", "running_woman_dark_skin_tone": "1f3c3-1f3ff-200d-2640-fe0f", "man-running_light_skin_tone": "1f3c3-1f3fb-200d-2642-fe0f", "running_man_light_skin_tone": "1f3c3-1f3fb-200d-2642-fe0f", "man-running_medium_light_skin_tone": "1f3c3-1f3fc-200d-2642-fe0f", "running_man_medium_light_skin_tone": "1f3c3-1f3fc-200d-2642-fe0f", "man-running_medium_skin_tone": "1f3c3-1f3fd-200d-2642-fe0f", "running_man_medium_skin_tone": "1f3c3-1f3fd-200d-2642-fe0f", "man-running_medium_dark_skin_tone": "1f3c3-1f3fe-200d-2642-fe0f", "running_man_medium_dark_skin_tone": "1f3c3-1f3fe-200d-2642-fe0f", "man-running_dark_skin_tone": "1f3c3-1f3ff-200d-2642-fe0f", "running_man_dark_skin_tone": "1f3c3-1f3ff-200d-2642-fe0f", "runner_light_skin_tone": "1f3c3-1f3fb", "running_light_skin_tone": "1f3c3-1f3fb", "runner_medium_light_skin_tone": "1f3c3-1f3fc", "running_medium_light_skin_tone": "1f3c3-1f3fc", "runner_medium_skin_tone": "1f3c3-1f3fd", "running_medium_skin_tone": "1f3c3-1f3fd", "runner_medium_dark_skin_tone": "1f3c3-1f3fe", "running_medium_dark_skin_tone": "1f3c3-1f3fe", "runner_dark_skin_tone": "1f3c3-1f3ff", "running_dark_skin_tone": "1f3c3-1f3ff", "woman-surfing_light_skin_tone": "1f3c4-1f3fb-200d-2640-fe0f", "surfing_woman_light_skin_tone": "1f3c4-1f3fb-200d-2640-fe0f", "woman-surfing_medium_light_skin_tone": "1f3c4-1f3fc-200d-2640-fe0f", "surfing_woman_medium_light_skin_tone": "1f3c4-1f3fc-200d-2640-fe0f", "woman-surfing_medium_skin_tone": "1f3c4-1f3fd-200d-2640-fe0f", "surfing_woman_medium_skin_tone": "1f3c4-1f3fd-200d-2640-fe0f", "woman-surfing_medium_dark_skin_tone": "1f3c4-1f3fe-200d-2640-fe0f", "surfing_woman_medium_dark_skin_tone": "1f3c4-1f3fe-200d-2640-fe0f", "woman-surfing_dark_skin_tone": "1f3c4-1f3ff-200d-2640-fe0f", "surfing_woman_dark_skin_tone": "1f3c4-1f3ff-200d-2640-fe0f", "man-surfing_light_skin_tone": "1f3c4-1f3fb-200d-2642-fe0f", "surfing_man_light_skin_tone": "1f3c4-1f3fb-200d-2642-fe0f", "man-surfing_medium_light_skin_tone": "1f3c4-1f3fc-200d-2642-fe0f", "surfing_man_medium_light_skin_tone": "1f3c4-1f3fc-200d-2642-fe0f", "man-surfing_medium_skin_tone": "1f3c4-1f3fd-200d-2642-fe0f", "surfing_man_medium_skin_tone": "1f3c4-1f3fd-200d-2642-fe0f", "man-surfing_medium_dark_skin_tone": "1f3c4-1f3fe-200d-2642-fe0f", "surfing_man_medium_dark_skin_tone": "1f3c4-1f3fe-200d-2642-fe0f", "man-surfing_dark_skin_tone": "1f3c4-1f3ff-200d-2642-fe0f", "surfing_man_dark_skin_tone": "1f3c4-1f3ff-200d-2642-fe0f", "surfer_light_skin_tone": "1f3c4-1f3fb", "surfer_medium_light_skin_tone": "1f3c4-1f3fc", "surfer_medium_skin_tone": "1f3c4-1f3fd", "surfer_medium_dark_skin_tone": "1f3c4-1f3fe", "surfer_dark_skin_tone": "1f3c4-1f3ff", "horse_racing_light_skin_tone": "1f3c7-1f3fb", "horse_racing_medium_light_skin_tone": "1f3c7-1f3fc", "horse_racing_medium_skin_tone": "1f3c7-1f3fd", "horse_racing_medium_dark_skin_tone": "1f3c7-1f3fe", "horse_racing_dark_skin_tone": "1f3c7-1f3ff", "woman-swimming_light_skin_tone": "1f3ca-1f3fb-200d-2640-fe0f", "swimming_woman_light_skin_tone": "1f3ca-1f3fb-200d-2640-fe0f", "woman-swimming_medium_light_skin_tone": "1f3ca-1f3fc-200d-2640-fe0f", "swimming_woman_medium_light_skin_tone": "1f3ca-1f3fc-200d-2640-fe0f", "woman-swimming_medium_skin_tone": "1f3ca-1f3fd-200d-2640-fe0f", "swimming_woman_medium_skin_tone": "1f3ca-1f3fd-200d-2640-fe0f", "woman-swimming_medium_dark_skin_tone": "1f3ca-1f3fe-200d-2640-fe0f", "swimming_woman_medium_dark_skin_tone": "1f3ca-1f3fe-200d-2640-fe0f", "woman-swimming_dark_skin_tone": "1f3ca-1f3ff-200d-2640-fe0f", "swimming_woman_dark_skin_tone": "1f3ca-1f3ff-200d-2640-fe0f", "man-swimming_light_skin_tone": "1f3ca-1f3fb-200d-2642-fe0f", "swimming_man_light_skin_tone": "1f3ca-1f3fb-200d-2642-fe0f", "man-swimming_medium_light_skin_tone": "1f3ca-1f3fc-200d-2642-fe0f", "swimming_man_medium_light_skin_tone": "1f3ca-1f3fc-200d-2642-fe0f", "man-swimming_medium_skin_tone": "1f3ca-1f3fd-200d-2642-fe0f", "swimming_man_medium_skin_tone": "1f3ca-1f3fd-200d-2642-fe0f", "man-swimming_medium_dark_skin_tone": "1f3ca-1f3fe-200d-2642-fe0f", "swimming_man_medium_dark_skin_tone": "1f3ca-1f3fe-200d-2642-fe0f", "man-swimming_dark_skin_tone": "1f3ca-1f3ff-200d-2642-fe0f", "swimming_man_dark_skin_tone": "1f3ca-1f3ff-200d-2642-fe0f", "swimmer_light_skin_tone": "1f3ca-1f3fb", "swimmer_medium_light_skin_tone": "1f3ca-1f3fc", "swimmer_medium_skin_tone": "1f3ca-1f3fd", "swimmer_medium_dark_skin_tone": "1f3ca-1f3fe", "swimmer_dark_skin_tone": "1f3ca-1f3ff", "woman-lifting-weights_light_skin_tone": "1f3cb-1f3fb-200d-2640-fe0f", "weight_lifting_woman_light_skin_tone": "1f3cb-1f3fb-200d-2640-fe0f", "woman-lifting-weights_medium_light_skin_tone": "1f3cb-1f3fc-200d-2640-fe0f", "weight_lifting_woman_medium_light_skin_tone": "1f3cb-1f3fc-200d-2640-fe0f", "woman-lifting-weights_medium_skin_tone": "1f3cb-1f3fd-200d-2640-fe0f", "weight_lifting_woman_medium_skin_tone": "1f3cb-1f3fd-200d-2640-fe0f", "woman-lifting-weights_medium_dark_skin_tone": "1f3cb-1f3fe-200d-2640-fe0f", "weight_lifting_woman_medium_dark_skin_tone": "1f3cb-1f3fe-200d-2640-fe0f", "woman-lifting-weights_dark_skin_tone": "1f3cb-1f3ff-200d-2640-fe0f", "weight_lifting_woman_dark_skin_tone": "1f3cb-1f3ff-200d-2640-fe0f", "man-lifting-weights_light_skin_tone": "1f3cb-1f3fb-200d-2642-fe0f", "weight_lifting_man_light_skin_tone": "1f3cb-1f3fb-200d-2642-fe0f", "man-lifting-weights_medium_light_skin_tone": "1f3cb-1f3fc-200d-2642-fe0f", "weight_lifting_man_medium_light_skin_tone": "1f3cb-1f3fc-200d-2642-fe0f", "man-lifting-weights_medium_skin_tone": "1f3cb-1f3fd-200d-2642-fe0f", "weight_lifting_man_medium_skin_tone": "1f3cb-1f3fd-200d-2642-fe0f", "man-lifting-weights_medium_dark_skin_tone": "1f3cb-1f3fe-200d-2642-fe0f", "weight_lifting_man_medium_dark_skin_tone": "1f3cb-1f3fe-200d-2642-fe0f", "man-lifting-weights_dark_skin_tone": "1f3cb-1f3ff-200d-2642-fe0f", "weight_lifting_man_dark_skin_tone": "1f3cb-1f3ff-200d-2642-fe0f", "weight_lifter_light_skin_tone": "1f3cb-1f3fb", "weight_lifter_medium_light_skin_tone": "1f3cb-1f3fc", "weight_lifter_medium_skin_tone": "1f3cb-1f3fd", "weight_lifter_medium_dark_skin_tone": "1f3cb-1f3fe", "weight_lifter_dark_skin_tone": "1f3cb-1f3ff", "woman-golfing_light_skin_tone": "1f3cc-1f3fb-200d-2640-fe0f", "golfing_woman_light_skin_tone": "1f3cc-1f3fb-200d-2640-fe0f", "woman-golfing_medium_light_skin_tone": "1f3cc-1f3fc-200d-2640-fe0f", "golfing_woman_medium_light_skin_tone": "1f3cc-1f3fc-200d-2640-fe0f", "woman-golfing_medium_skin_tone": "1f3cc-1f3fd-200d-2640-fe0f", "golfing_woman_medium_skin_tone": "1f3cc-1f3fd-200d-2640-fe0f", "woman-golfing_medium_dark_skin_tone": "1f3cc-1f3fe-200d-2640-fe0f", "golfing_woman_medium_dark_skin_tone": "1f3cc-1f3fe-200d-2640-fe0f", "woman-golfing_dark_skin_tone": "1f3cc-1f3ff-200d-2640-fe0f", "golfing_woman_dark_skin_tone": "1f3cc-1f3ff-200d-2640-fe0f", "man-golfing_light_skin_tone": "1f3cc-1f3fb-200d-2642-fe0f", "golfing_man_light_skin_tone": "1f3cc-1f3fb-200d-2642-fe0f", "man-golfing_medium_light_skin_tone": "1f3cc-1f3fc-200d-2642-fe0f", "golfing_man_medium_light_skin_tone": "1f3cc-1f3fc-200d-2642-fe0f", "man-golfing_medium_skin_tone": "1f3cc-1f3fd-200d-2642-fe0f", "golfing_man_medium_skin_tone": "1f3cc-1f3fd-200d-2642-fe0f", "man-golfing_medium_dark_skin_tone": "1f3cc-1f3fe-200d-2642-fe0f", "golfing_man_medium_dark_skin_tone": "1f3cc-1f3fe-200d-2642-fe0f", "man-golfing_dark_skin_tone": "1f3cc-1f3ff-200d-2642-fe0f", "golfing_man_dark_skin_tone": "1f3cc-1f3ff-200d-2642-fe0f", "golfer_light_skin_tone": "1f3cc-1f3fb", "golfer_medium_light_skin_tone": "1f3cc-1f3fc", "golfer_medium_skin_tone": "1f3cc-1f3fd", "golfer_medium_dark_skin_tone": "1f3cc-1f3fe", "golfer_dark_skin_tone": "1f3cc-1f3ff", "ear_light_skin_tone": "1f442-1f3fb", "ear_medium_light_skin_tone": "1f442-1f3fc", "ear_medium_skin_tone": "1f442-1f3fd", "ear_medium_dark_skin_tone": "1f442-1f3fe", "ear_dark_skin_tone": "1f442-1f3ff", "nose_light_skin_tone": "1f443-1f3fb", "nose_medium_light_skin_tone": "1f443-1f3fc", "nose_medium_skin_tone": "1f443-1f3fd", "nose_medium_dark_skin_tone": "1f443-1f3fe", "nose_dark_skin_tone": "1f443-1f3ff", "point_up_2_light_skin_tone": "1f446-1f3fb", "point_up_2_medium_light_skin_tone": "1f446-1f3fc", "point_up_2_medium_skin_tone": "1f446-1f3fd", "point_up_2_medium_dark_skin_tone": "1f446-1f3fe", "point_up_2_dark_skin_tone": "1f446-1f3ff", "point_down_light_skin_tone": "1f447-1f3fb", "point_down_medium_light_skin_tone": "1f447-1f3fc", "point_down_medium_skin_tone": "1f447-1f3fd", "point_down_medium_dark_skin_tone": "1f447-1f3fe", "point_down_dark_skin_tone": "1f447-1f3ff", "point_left_light_skin_tone": "1f448-1f3fb", "point_left_medium_light_skin_tone": "1f448-1f3fc", "point_left_medium_skin_tone": "1f448-1f3fd", "point_left_medium_dark_skin_tone": "1f448-1f3fe", "point_left_dark_skin_tone": "1f448-1f3ff", "point_right_light_skin_tone": "1f449-1f3fb", "point_right_medium_light_skin_tone": "1f449-1f3fc", "point_right_medium_skin_tone": "1f449-1f3fd", "point_right_medium_dark_skin_tone": "1f449-1f3fe", "point_right_dark_skin_tone": "1f449-1f3ff", "facepunch_light_skin_tone": "1f44a-1f3fb", "punch_light_skin_tone": "1f44a-1f3fb", "fist_oncoming_light_skin_tone": "1f44a-1f3fb", "facepunch_medium_light_skin_tone": "1f44a-1f3fc", "punch_medium_light_skin_tone": "1f44a-1f3fc", "fist_oncoming_medium_light_skin_tone": "1f44a-1f3fc", "facepunch_medium_skin_tone": "1f44a-1f3fd", "punch_medium_skin_tone": "1f44a-1f3fd", "fist_oncoming_medium_skin_tone": "1f44a-1f3fd", "facepunch_medium_dark_skin_tone": "1f44a-1f3fe", "punch_medium_dark_skin_tone": "1f44a-1f3fe", "fist_oncoming_medium_dark_skin_tone": "1f44a-1f3fe", "facepunch_dark_skin_tone": "1f44a-1f3ff", "punch_dark_skin_tone": "1f44a-1f3ff", "fist_oncoming_dark_skin_tone": "1f44a-1f3ff", "wave_light_skin_tone": "1f44b-1f3fb", "wave_medium_light_skin_tone": "1f44b-1f3fc", "wave_medium_skin_tone": "1f44b-1f3fd", "wave_medium_dark_skin_tone": "1f44b-1f3fe", "wave_dark_skin_tone": "1f44b-1f3ff", "ok_hand_light_skin_tone": "1f44c-1f3fb", "ok_hand_medium_light_skin_tone": "1f44c-1f3fc", "ok_hand_medium_skin_tone": "1f44c-1f3fd", "ok_hand_medium_dark_skin_tone": "1f44c-1f3fe", "ok_hand_dark_skin_tone": "1f44c-1f3ff", "+1_light_skin_tone": "1f44d-1f3fb", "thumbsup_light_skin_tone": "1f44d-1f3fb", "+1_medium_light_skin_tone": "1f44d-1f3fc", "thumbsup_medium_light_skin_tone": "1f44d-1f3fc", "+1_medium_skin_tone": "1f44d-1f3fd", "thumbsup_medium_skin_tone": "1f44d-1f3fd", "+1_medium_dark_skin_tone": "1f44d-1f3fe", "thumbsup_medium_dark_skin_tone": "1f44d-1f3fe", "+1_dark_skin_tone": "1f44d-1f3ff", "thumbsup_dark_skin_tone": "1f44d-1f3ff", "-1_light_skin_tone": "1f44e-1f3fb", "thumbsdown_light_skin_tone": "1f44e-1f3fb", "-1_medium_light_skin_tone": "1f44e-1f3fc", "thumbsdown_medium_light_skin_tone": "1f44e-1f3fc", "-1_medium_skin_tone": "1f44e-1f3fd", "thumbsdown_medium_skin_tone": "1f44e-1f3fd", "-1_medium_dark_skin_tone": "1f44e-1f3fe", "thumbsdown_medium_dark_skin_tone": "1f44e-1f3fe", "-1_dark_skin_tone": "1f44e-1f3ff", "thumbsdown_dark_skin_tone": "1f44e-1f3ff", "clap_light_skin_tone": "1f44f-1f3fb", "clap_medium_light_skin_tone": "1f44f-1f3fc", "clap_medium_skin_tone": "1f44f-1f3fd", "clap_medium_dark_skin_tone": "1f44f-1f3fe", "clap_dark_skin_tone": "1f44f-1f3ff", "open_hands_light_skin_tone": "1f450-1f3fb", "open_hands_medium_light_skin_tone": "1f450-1f3fc", "open_hands_medium_skin_tone": "1f450-1f3fd", "open_hands_medium_dark_skin_tone": "1f450-1f3fe", "open_hands_dark_skin_tone": "1f450-1f3ff", "boy_light_skin_tone": "1f466-1f3fb", "boy_medium_light_skin_tone": "1f466-1f3fc", "boy_medium_skin_tone": "1f466-1f3fd", "boy_medium_dark_skin_tone": "1f466-1f3fe", "boy_dark_skin_tone": "1f466-1f3ff", "girl_light_skin_tone": "1f467-1f3fb", "girl_medium_light_skin_tone": "1f467-1f3fc", "girl_medium_skin_tone": "1f467-1f3fd", "girl_medium_dark_skin_tone": "1f467-1f3fe", "girl_dark_skin_tone": "1f467-1f3ff", "male-farmer_light_skin_tone": "1f468-1f3fb-200d-1f33e", "man_farmer_light_skin_tone": "1f468-1f3fb-200d-1f33e", "male-farmer_medium_light_skin_tone": "1f468-1f3fc-200d-1f33e", "man_farmer_medium_light_skin_tone": "1f468-1f3fc-200d-1f33e", "male-farmer_medium_skin_tone": "1f468-1f3fd-200d-1f33e", "man_farmer_medium_skin_tone": "1f468-1f3fd-200d-1f33e", "male-farmer_medium_dark_skin_tone": "1f468-1f3fe-200d-1f33e", "man_farmer_medium_dark_skin_tone": "1f468-1f3fe-200d-1f33e", "male-farmer_dark_skin_tone": "1f468-1f3ff-200d-1f33e", "man_farmer_dark_skin_tone": "1f468-1f3ff-200d-1f33e", "male-cook_light_skin_tone": "1f468-1f3fb-200d-1f373", "man_cook_light_skin_tone": "1f468-1f3fb-200d-1f373", "male-cook_medium_light_skin_tone": "1f468-1f3fc-200d-1f373", "man_cook_medium_light_skin_tone": "1f468-1f3fc-200d-1f373", "male-cook_medium_skin_tone": "1f468-1f3fd-200d-1f373", "man_cook_medium_skin_tone": "1f468-1f3fd-200d-1f373", "male-cook_medium_dark_skin_tone": "1f468-1f3fe-200d-1f373", "man_cook_medium_dark_skin_tone": "1f468-1f3fe-200d-1f373", "male-cook_dark_skin_tone": "1f468-1f3ff-200d-1f373", "man_cook_dark_skin_tone": "1f468-1f3ff-200d-1f373", "man_feeding_baby_light_skin_tone": "1f468-1f3fb-200d-1f37c", "man_feeding_baby_medium_light_skin_tone": "1f468-1f3fc-200d-1f37c", "man_feeding_baby_medium_skin_tone": "1f468-1f3fd-200d-1f37c", "man_feeding_baby_medium_dark_skin_tone": "1f468-1f3fe-200d-1f37c", "man_feeding_baby_dark_skin_tone": "1f468-1f3ff-200d-1f37c", "male-student_light_skin_tone": "1f468-1f3fb-200d-1f393", "man_student_light_skin_tone": "1f468-1f3fb-200d-1f393", "male-student_medium_light_skin_tone": "1f468-1f3fc-200d-1f393", "man_student_medium_light_skin_tone": "1f468-1f3fc-200d-1f393", "male-student_medium_skin_tone": "1f468-1f3fd-200d-1f393", "man_student_medium_skin_tone": "1f468-1f3fd-200d-1f393", "male-student_medium_dark_skin_tone": "1f468-1f3fe-200d-1f393", "man_student_medium_dark_skin_tone": "1f468-1f3fe-200d-1f393", "male-student_dark_skin_tone": "1f468-1f3ff-200d-1f393", "man_student_dark_skin_tone": "1f468-1f3ff-200d-1f393", "male-singer_light_skin_tone": "1f468-1f3fb-200d-1f3a4", "man_singer_light_skin_tone": "1f468-1f3fb-200d-1f3a4", "male-singer_medium_light_skin_tone": "1f468-1f3fc-200d-1f3a4", "man_singer_medium_light_skin_tone": "1f468-1f3fc-200d-1f3a4", "male-singer_medium_skin_tone": "1f468-1f3fd-200d-1f3a4", "man_singer_medium_skin_tone": "1f468-1f3fd-200d-1f3a4", "male-singer_medium_dark_skin_tone": "1f468-1f3fe-200d-1f3a4", "man_singer_medium_dark_skin_tone": "1f468-1f3fe-200d-1f3a4", "male-singer_dark_skin_tone": "1f468-1f3ff-200d-1f3a4", "man_singer_dark_skin_tone": "1f468-1f3ff-200d-1f3a4", "male-artist_light_skin_tone": "1f468-1f3fb-200d-1f3a8", "man_artist_light_skin_tone": "1f468-1f3fb-200d-1f3a8", "male-artist_medium_light_skin_tone": "1f468-1f3fc-200d-1f3a8", "man_artist_medium_light_skin_tone": "1f468-1f3fc-200d-1f3a8", "male-artist_medium_skin_tone": "1f468-1f3fd-200d-1f3a8", "man_artist_medium_skin_tone": "1f468-1f3fd-200d-1f3a8", "male-artist_medium_dark_skin_tone": "1f468-1f3fe-200d-1f3a8", "man_artist_medium_dark_skin_tone": "1f468-1f3fe-200d-1f3a8", "male-artist_dark_skin_tone": "1f468-1f3ff-200d-1f3a8", "man_artist_dark_skin_tone": "1f468-1f3ff-200d-1f3a8", "male-teacher_light_skin_tone": "1f468-1f3fb-200d-1f3eb", "man_teacher_light_skin_tone": "1f468-1f3fb-200d-1f3eb", "male-teacher_medium_light_skin_tone": "1f468-1f3fc-200d-1f3eb", "man_teacher_medium_light_skin_tone": "1f468-1f3fc-200d-1f3eb", "male-teacher_medium_skin_tone": "1f468-1f3fd-200d-1f3eb", "man_teacher_medium_skin_tone": "1f468-1f3fd-200d-1f3eb", "male-teacher_medium_dark_skin_tone": "1f468-1f3fe-200d-1f3eb", "man_teacher_medium_dark_skin_tone": "1f468-1f3fe-200d-1f3eb", "male-teacher_dark_skin_tone": "1f468-1f3ff-200d-1f3eb", "man_teacher_dark_skin_tone": "1f468-1f3ff-200d-1f3eb", "male-factory-worker_light_skin_tone": "1f468-1f3fb-200d-1f3ed", "man_factory_worker_light_skin_tone": "1f468-1f3fb-200d-1f3ed", "male-factory-worker_medium_light_skin_tone": "1f468-1f3fc-200d-1f3ed", "man_factory_worker_medium_light_skin_tone": "1f468-1f3fc-200d-1f3ed", "male-factory-worker_medium_skin_tone": "1f468-1f3fd-200d-1f3ed", "man_factory_worker_medium_skin_tone": "1f468-1f3fd-200d-1f3ed", "male-factory-worker_medium_dark_skin_tone": "1f468-1f3fe-200d-1f3ed", "man_factory_worker_medium_dark_skin_tone": "1f468-1f3fe-200d-1f3ed", "male-factory-worker_dark_skin_tone": "1f468-1f3ff-200d-1f3ed", "man_factory_worker_dark_skin_tone": "1f468-1f3ff-200d-1f3ed", "male-technologist_light_skin_tone": "1f468-1f3fb-200d-1f4bb", "man_technologist_light_skin_tone": "1f468-1f3fb-200d-1f4bb", "male-technologist_medium_light_skin_tone": "1f468-1f3fc-200d-1f4bb", "man_technologist_medium_light_skin_tone": "1f468-1f3fc-200d-1f4bb", "male-technologist_medium_skin_tone": "1f468-1f3fd-200d-1f4bb", "man_technologist_medium_skin_tone": "1f468-1f3fd-200d-1f4bb", "male-technologist_medium_dark_skin_tone": "1f468-1f3fe-200d-1f4bb", "man_technologist_medium_dark_skin_tone": "1f468-1f3fe-200d-1f4bb", "male-technologist_dark_skin_tone": "1f468-1f3ff-200d-1f4bb", "man_technologist_dark_skin_tone": "1f468-1f3ff-200d-1f4bb", "male-office-worker_light_skin_tone": "1f468-1f3fb-200d-1f4bc", "man_office_worker_light_skin_tone": "1f468-1f3fb-200d-1f4bc", "male-office-worker_medium_light_skin_tone": "1f468-1f3fc-200d-1f4bc", "man_office_worker_medium_light_skin_tone": "1f468-1f3fc-200d-1f4bc", "male-office-worker_medium_skin_tone": "1f468-1f3fd-200d-1f4bc", "man_office_worker_medium_skin_tone": "1f468-1f3fd-200d-1f4bc", "male-office-worker_medium_dark_skin_tone": "1f468-1f3fe-200d-1f4bc", "man_office_worker_medium_dark_skin_tone": "1f468-1f3fe-200d-1f4bc", "male-office-worker_dark_skin_tone": "1f468-1f3ff-200d-1f4bc", "man_office_worker_dark_skin_tone": "1f468-1f3ff-200d-1f4bc", "male-mechanic_light_skin_tone": "1f468-1f3fb-200d-1f527", "man_mechanic_light_skin_tone": "1f468-1f3fb-200d-1f527", "male-mechanic_medium_light_skin_tone": "1f468-1f3fc-200d-1f527", "man_mechanic_medium_light_skin_tone": "1f468-1f3fc-200d-1f527", "male-mechanic_medium_skin_tone": "1f468-1f3fd-200d-1f527", "man_mechanic_medium_skin_tone": "1f468-1f3fd-200d-1f527", "male-mechanic_medium_dark_skin_tone": "1f468-1f3fe-200d-1f527", "man_mechanic_medium_dark_skin_tone": "1f468-1f3fe-200d-1f527", "male-mechanic_dark_skin_tone": "1f468-1f3ff-200d-1f527", "man_mechanic_dark_skin_tone": "1f468-1f3ff-200d-1f527", "male-scientist_light_skin_tone": "1f468-1f3fb-200d-1f52c", "man_scientist_light_skin_tone": "1f468-1f3fb-200d-1f52c", "male-scientist_medium_light_skin_tone": "1f468-1f3fc-200d-1f52c", "man_scientist_medium_light_skin_tone": "1f468-1f3fc-200d-1f52c", "male-scientist_medium_skin_tone": "1f468-1f3fd-200d-1f52c", "man_scientist_medium_skin_tone": "1f468-1f3fd-200d-1f52c", "male-scientist_medium_dark_skin_tone": "1f468-1f3fe-200d-1f52c", "man_scientist_medium_dark_skin_tone": "1f468-1f3fe-200d-1f52c", "male-scientist_dark_skin_tone": "1f468-1f3ff-200d-1f52c", "man_scientist_dark_skin_tone": "1f468-1f3ff-200d-1f52c", "male-astronaut_light_skin_tone": "1f468-1f3fb-200d-1f680", "man_astronaut_light_skin_tone": "1f468-1f3fb-200d-1f680", "male-astronaut_medium_light_skin_tone": "1f468-1f3fc-200d-1f680", "man_astronaut_medium_light_skin_tone": "1f468-1f3fc-200d-1f680", "male-astronaut_medium_skin_tone": "1f468-1f3fd-200d-1f680", "man_astronaut_medium_skin_tone": "1f468-1f3fd-200d-1f680", "male-astronaut_medium_dark_skin_tone": "1f468-1f3fe-200d-1f680", "man_astronaut_medium_dark_skin_tone": "1f468-1f3fe-200d-1f680", "male-astronaut_dark_skin_tone": "1f468-1f3ff-200d-1f680", "man_astronaut_dark_skin_tone": "1f468-1f3ff-200d-1f680", "male-firefighter_light_skin_tone": "1f468-1f3fb-200d-1f692", "man_firefighter_light_skin_tone": "1f468-1f3fb-200d-1f692", "male-firefighter_medium_light_skin_tone": "1f468-1f3fc-200d-1f692", "man_firefighter_medium_light_skin_tone": "1f468-1f3fc-200d-1f692", "male-firefighter_medium_skin_tone": "1f468-1f3fd-200d-1f692", "man_firefighter_medium_skin_tone": "1f468-1f3fd-200d-1f692", "male-firefighter_medium_dark_skin_tone": "1f468-1f3fe-200d-1f692", "man_firefighter_medium_dark_skin_tone": "1f468-1f3fe-200d-1f692", "male-firefighter_dark_skin_tone": "1f468-1f3ff-200d-1f692", "man_firefighter_dark_skin_tone": "1f468-1f3ff-200d-1f692", "man_with_probing_cane_light_skin_tone": "1f468-1f3fb-200d-1f9af", "man_with_probing_cane_medium_light_skin_tone": "1f468-1f3fc-200d-1f9af", "man_with_probing_cane_medium_skin_tone": "1f468-1f3fd-200d-1f9af", "man_with_probing_cane_medium_dark_skin_tone": "1f468-1f3fe-200d-1f9af", "man_with_probing_cane_dark_skin_tone": "1f468-1f3ff-200d-1f9af", "red_haired_man_light_skin_tone": "1f468-1f3fb-200d-1f9b0", "red_haired_man_medium_light_skin_tone": "1f468-1f3fc-200d-1f9b0", "red_haired_man_medium_skin_tone": "1f468-1f3fd-200d-1f9b0", "red_haired_man_medium_dark_skin_tone": "1f468-1f3fe-200d-1f9b0", "red_haired_man_dark_skin_tone": "1f468-1f3ff-200d-1f9b0", "curly_haired_man_light_skin_tone": "1f468-1f3fb-200d-1f9b1", "curly_haired_man_medium_light_skin_tone": "1f468-1f3fc-200d-1f9b1", "curly_haired_man_medium_skin_tone": "1f468-1f3fd-200d-1f9b1", "curly_haired_man_medium_dark_skin_tone": "1f468-1f3fe-200d-1f9b1", "curly_haired_man_dark_skin_tone": "1f468-1f3ff-200d-1f9b1", "bald_man_light_skin_tone": "1f468-1f3fb-200d-1f9b2", "bald_man_medium_light_skin_tone": "1f468-1f3fc-200d-1f9b2", "bald_man_medium_skin_tone": "1f468-1f3fd-200d-1f9b2", "bald_man_medium_dark_skin_tone": "1f468-1f3fe-200d-1f9b2", "bald_man_dark_skin_tone": "1f468-1f3ff-200d-1f9b2", "white_haired_man_light_skin_tone": "1f468-1f3fb-200d-1f9b3", "white_haired_man_medium_light_skin_tone": "1f468-1f3fc-200d-1f9b3", "white_haired_man_medium_skin_tone": "1f468-1f3fd-200d-1f9b3", "white_haired_man_medium_dark_skin_tone": "1f468-1f3fe-200d-1f9b3", "white_haired_man_dark_skin_tone": "1f468-1f3ff-200d-1f9b3", "man_in_motorized_wheelchair_light_skin_tone": "1f468-1f3fb-200d-1f9bc", "man_in_motorized_wheelchair_medium_light_skin_tone": "1f468-1f3fc-200d-1f9bc", "man_in_motorized_wheelchair_medium_skin_tone": "1f468-1f3fd-200d-1f9bc", "man_in_motorized_wheelchair_medium_dark_skin_tone": "1f468-1f3fe-200d-1f9bc", "man_in_motorized_wheelchair_dark_skin_tone": "1f468-1f3ff-200d-1f9bc", "man_in_manual_wheelchair_light_skin_tone": "1f468-1f3fb-200d-1f9bd", "man_in_manual_wheelchair_medium_light_skin_tone": "1f468-1f3fc-200d-1f9bd", "man_in_manual_wheelchair_medium_skin_tone": "1f468-1f3fd-200d-1f9bd", "man_in_manual_wheelchair_medium_dark_skin_tone": "1f468-1f3fe-200d-1f9bd", "man_in_manual_wheelchair_dark_skin_tone": "1f468-1f3ff-200d-1f9bd", "male-doctor_light_skin_tone": "1f468-1f3fb-200d-2695-fe0f", "man_health_worker_light_skin_tone": "1f468-1f3fb-200d-2695-fe0f", "male-doctor_medium_light_skin_tone": "1f468-1f3fc-200d-2695-fe0f", "man_health_worker_medium_light_skin_tone": "1f468-1f3fc-200d-2695-fe0f", "male-doctor_medium_skin_tone": "1f468-1f3fd-200d-2695-fe0f", "man_health_worker_medium_skin_tone": "1f468-1f3fd-200d-2695-fe0f", "male-doctor_medium_dark_skin_tone": "1f468-1f3fe-200d-2695-fe0f", "man_health_worker_medium_dark_skin_tone": "1f468-1f3fe-200d-2695-fe0f", "male-doctor_dark_skin_tone": "1f468-1f3ff-200d-2695-fe0f", "man_health_worker_dark_skin_tone": "1f468-1f3ff-200d-2695-fe0f", "male-judge_light_skin_tone": "1f468-1f3fb-200d-2696-fe0f", "man_judge_light_skin_tone": "1f468-1f3fb-200d-2696-fe0f", "male-judge_medium_light_skin_tone": "1f468-1f3fc-200d-2696-fe0f", "man_judge_medium_light_skin_tone": "1f468-1f3fc-200d-2696-fe0f", "male-judge_medium_skin_tone": "1f468-1f3fd-200d-2696-fe0f", "man_judge_medium_skin_tone": "1f468-1f3fd-200d-2696-fe0f", "male-judge_medium_dark_skin_tone": "1f468-1f3fe-200d-2696-fe0f", "man_judge_medium_dark_skin_tone": "1f468-1f3fe-200d-2696-fe0f", "male-judge_dark_skin_tone": "1f468-1f3ff-200d-2696-fe0f", "man_judge_dark_skin_tone": "1f468-1f3ff-200d-2696-fe0f", "male-pilot_light_skin_tone": "1f468-1f3fb-200d-2708-fe0f", "man_pilot_light_skin_tone": "1f468-1f3fb-200d-2708-fe0f", "male-pilot_medium_light_skin_tone": "1f468-1f3fc-200d-2708-fe0f", "man_pilot_medium_light_skin_tone": "1f468-1f3fc-200d-2708-fe0f", "male-pilot_medium_skin_tone": "1f468-1f3fd-200d-2708-fe0f", "man_pilot_medium_skin_tone": "1f468-1f3fd-200d-2708-fe0f", "male-pilot_medium_dark_skin_tone": "1f468-1f3fe-200d-2708-fe0f", "man_pilot_medium_dark_skin_tone": "1f468-1f3fe-200d-2708-fe0f", "male-pilot_dark_skin_tone": "1f468-1f3ff-200d-2708-fe0f", "man_pilot_dark_skin_tone": "1f468-1f3ff-200d-2708-fe0f", "man_light_skin_tone": "1f468-1f3fb", "man_medium_light_skin_tone": "1f468-1f3fc", "man_medium_skin_tone": "1f468-1f3fd", "man_medium_dark_skin_tone": "1f468-1f3fe", "man_dark_skin_tone": "1f468-1f3ff", "female-farmer_light_skin_tone": "1f469-1f3fb-200d-1f33e", "woman_farmer_light_skin_tone": "1f469-1f3fb-200d-1f33e", "female-farmer_medium_light_skin_tone": "1f469-1f3fc-200d-1f33e", "woman_farmer_medium_light_skin_tone": "1f469-1f3fc-200d-1f33e", "female-farmer_medium_skin_tone": "1f469-1f3fd-200d-1f33e", "woman_farmer_medium_skin_tone": "1f469-1f3fd-200d-1f33e", "female-farmer_medium_dark_skin_tone": "1f469-1f3fe-200d-1f33e", "woman_farmer_medium_dark_skin_tone": "1f469-1f3fe-200d-1f33e", "female-farmer_dark_skin_tone": "1f469-1f3ff-200d-1f33e", "woman_farmer_dark_skin_tone": "1f469-1f3ff-200d-1f33e", "female-cook_light_skin_tone": "1f469-1f3fb-200d-1f373", "woman_cook_light_skin_tone": "1f469-1f3fb-200d-1f373", "female-cook_medium_light_skin_tone": "1f469-1f3fc-200d-1f373", "woman_cook_medium_light_skin_tone": "1f469-1f3fc-200d-1f373", "female-cook_medium_skin_tone": "1f469-1f3fd-200d-1f373", "woman_cook_medium_skin_tone": "1f469-1f3fd-200d-1f373", "female-cook_medium_dark_skin_tone": "1f469-1f3fe-200d-1f373", "woman_cook_medium_dark_skin_tone": "1f469-1f3fe-200d-1f373", "female-cook_dark_skin_tone": "1f469-1f3ff-200d-1f373", "woman_cook_dark_skin_tone": "1f469-1f3ff-200d-1f373", "woman_feeding_baby_light_skin_tone": "1f469-1f3fb-200d-1f37c", "woman_feeding_baby_medium_light_skin_tone": "1f469-1f3fc-200d-1f37c", "woman_feeding_baby_medium_skin_tone": "1f469-1f3fd-200d-1f37c", "woman_feeding_baby_medium_dark_skin_tone": "1f469-1f3fe-200d-1f37c", "woman_feeding_baby_dark_skin_tone": "1f469-1f3ff-200d-1f37c", "female-student_light_skin_tone": "1f469-1f3fb-200d-1f393", "woman_student_light_skin_tone": "1f469-1f3fb-200d-1f393", "female-student_medium_light_skin_tone": "1f469-1f3fc-200d-1f393", "woman_student_medium_light_skin_tone": "1f469-1f3fc-200d-1f393", "female-student_medium_skin_tone": "1f469-1f3fd-200d-1f393", "woman_student_medium_skin_tone": "1f469-1f3fd-200d-1f393", "female-student_medium_dark_skin_tone": "1f469-1f3fe-200d-1f393", "woman_student_medium_dark_skin_tone": "1f469-1f3fe-200d-1f393", "female-student_dark_skin_tone": "1f469-1f3ff-200d-1f393", "woman_student_dark_skin_tone": "1f469-1f3ff-200d-1f393", "female-singer_light_skin_tone": "1f469-1f3fb-200d-1f3a4", "woman_singer_light_skin_tone": "1f469-1f3fb-200d-1f3a4", "female-singer_medium_light_skin_tone": "1f469-1f3fc-200d-1f3a4", "woman_singer_medium_light_skin_tone": "1f469-1f3fc-200d-1f3a4", "female-singer_medium_skin_tone": "1f469-1f3fd-200d-1f3a4", "woman_singer_medium_skin_tone": "1f469-1f3fd-200d-1f3a4", "female-singer_medium_dark_skin_tone": "1f469-1f3fe-200d-1f3a4", "woman_singer_medium_dark_skin_tone": "1f469-1f3fe-200d-1f3a4", "female-singer_dark_skin_tone": "1f469-1f3ff-200d-1f3a4", "woman_singer_dark_skin_tone": "1f469-1f3ff-200d-1f3a4", "female-artist_light_skin_tone": "1f469-1f3fb-200d-1f3a8", "woman_artist_light_skin_tone": "1f469-1f3fb-200d-1f3a8", "female-artist_medium_light_skin_tone": "1f469-1f3fc-200d-1f3a8", "woman_artist_medium_light_skin_tone": "1f469-1f3fc-200d-1f3a8", "female-artist_medium_skin_tone": "1f469-1f3fd-200d-1f3a8", "woman_artist_medium_skin_tone": "1f469-1f3fd-200d-1f3a8", "female-artist_medium_dark_skin_tone": "1f469-1f3fe-200d-1f3a8", "woman_artist_medium_dark_skin_tone": "1f469-1f3fe-200d-1f3a8", "female-artist_dark_skin_tone": "1f469-1f3ff-200d-1f3a8", "woman_artist_dark_skin_tone": "1f469-1f3ff-200d-1f3a8", "female-teacher_light_skin_tone": "1f469-1f3fb-200d-1f3eb", "woman_teacher_light_skin_tone": "1f469-1f3fb-200d-1f3eb", "female-teacher_medium_light_skin_tone": "1f469-1f3fc-200d-1f3eb", "woman_teacher_medium_light_skin_tone": "1f469-1f3fc-200d-1f3eb", "female-teacher_medium_skin_tone": "1f469-1f3fd-200d-1f3eb", "woman_teacher_medium_skin_tone": "1f469-1f3fd-200d-1f3eb", "female-teacher_medium_dark_skin_tone": "1f469-1f3fe-200d-1f3eb", "woman_teacher_medium_dark_skin_tone": "1f469-1f3fe-200d-1f3eb", "female-teacher_dark_skin_tone": "1f469-1f3ff-200d-1f3eb", "woman_teacher_dark_skin_tone": "1f469-1f3ff-200d-1f3eb", "female-factory-worker_light_skin_tone": "1f469-1f3fb-200d-1f3ed", "woman_factory_worker_light_skin_tone": "1f469-1f3fb-200d-1f3ed", "female-factory-worker_medium_light_skin_tone": "1f469-1f3fc-200d-1f3ed", "woman_factory_worker_medium_light_skin_tone": "1f469-1f3fc-200d-1f3ed", "female-factory-worker_medium_skin_tone": "1f469-1f3fd-200d-1f3ed", "woman_factory_worker_medium_skin_tone": "1f469-1f3fd-200d-1f3ed", "female-factory-worker_medium_dark_skin_tone": "1f469-1f3fe-200d-1f3ed", "woman_factory_worker_medium_dark_skin_tone": "1f469-1f3fe-200d-1f3ed", "female-factory-worker_dark_skin_tone": "1f469-1f3ff-200d-1f3ed", "woman_factory_worker_dark_skin_tone": "1f469-1f3ff-200d-1f3ed", "female-technologist_light_skin_tone": "1f469-1f3fb-200d-1f4bb", "woman_technologist_light_skin_tone": "1f469-1f3fb-200d-1f4bb", "female-technologist_medium_light_skin_tone": "1f469-1f3fc-200d-1f4bb", "woman_technologist_medium_light_skin_tone": "1f469-1f3fc-200d-1f4bb", "female-technologist_medium_skin_tone": "1f469-1f3fd-200d-1f4bb", "woman_technologist_medium_skin_tone": "1f469-1f3fd-200d-1f4bb", "female-technologist_medium_dark_skin_tone": "1f469-1f3fe-200d-1f4bb", "woman_technologist_medium_dark_skin_tone": "1f469-1f3fe-200d-1f4bb", "female-technologist_dark_skin_tone": "1f469-1f3ff-200d-1f4bb", "woman_technologist_dark_skin_tone": "1f469-1f3ff-200d-1f4bb", "female-office-worker_light_skin_tone": "1f469-1f3fb-200d-1f4bc", "woman_office_worker_light_skin_tone": "1f469-1f3fb-200d-1f4bc", "female-office-worker_medium_light_skin_tone": "1f469-1f3fc-200d-1f4bc", "woman_office_worker_medium_light_skin_tone": "1f469-1f3fc-200d-1f4bc", "female-office-worker_medium_skin_tone": "1f469-1f3fd-200d-1f4bc", "woman_office_worker_medium_skin_tone": "1f469-1f3fd-200d-1f4bc", "female-office-worker_medium_dark_skin_tone": "1f469-1f3fe-200d-1f4bc", "woman_office_worker_medium_dark_skin_tone": "1f469-1f3fe-200d-1f4bc", "female-office-worker_dark_skin_tone": "1f469-1f3ff-200d-1f4bc", "woman_office_worker_dark_skin_tone": "1f469-1f3ff-200d-1f4bc", "female-mechanic_light_skin_tone": "1f469-1f3fb-200d-1f527", "woman_mechanic_light_skin_tone": "1f469-1f3fb-200d-1f527", "female-mechanic_medium_light_skin_tone": "1f469-1f3fc-200d-1f527", "woman_mechanic_medium_light_skin_tone": "1f469-1f3fc-200d-1f527", "female-mechanic_medium_skin_tone": "1f469-1f3fd-200d-1f527", "woman_mechanic_medium_skin_tone": "1f469-1f3fd-200d-1f527", "female-mechanic_medium_dark_skin_tone": "1f469-1f3fe-200d-1f527", "woman_mechanic_medium_dark_skin_tone": "1f469-1f3fe-200d-1f527", "female-mechanic_dark_skin_tone": "1f469-1f3ff-200d-1f527", "woman_mechanic_dark_skin_tone": "1f469-1f3ff-200d-1f527", "female-scientist_light_skin_tone": "1f469-1f3fb-200d-1f52c", "woman_scientist_light_skin_tone": "1f469-1f3fb-200d-1f52c", "female-scientist_medium_light_skin_tone": "1f469-1f3fc-200d-1f52c", "woman_scientist_medium_light_skin_tone": "1f469-1f3fc-200d-1f52c", "female-scientist_medium_skin_tone": "1f469-1f3fd-200d-1f52c", "woman_scientist_medium_skin_tone": "1f469-1f3fd-200d-1f52c", "female-scientist_medium_dark_skin_tone": "1f469-1f3fe-200d-1f52c", "woman_scientist_medium_dark_skin_tone": "1f469-1f3fe-200d-1f52c", "female-scientist_dark_skin_tone": "1f469-1f3ff-200d-1f52c", "woman_scientist_dark_skin_tone": "1f469-1f3ff-200d-1f52c", "female-astronaut_light_skin_tone": "1f469-1f3fb-200d-1f680", "woman_astronaut_light_skin_tone": "1f469-1f3fb-200d-1f680", "female-astronaut_medium_light_skin_tone": "1f469-1f3fc-200d-1f680", "woman_astronaut_medium_light_skin_tone": "1f469-1f3fc-200d-1f680", "female-astronaut_medium_skin_tone": "1f469-1f3fd-200d-1f680", "woman_astronaut_medium_skin_tone": "1f469-1f3fd-200d-1f680", "female-astronaut_medium_dark_skin_tone": "1f469-1f3fe-200d-1f680", "woman_astronaut_medium_dark_skin_tone": "1f469-1f3fe-200d-1f680", "female-astronaut_dark_skin_tone": "1f469-1f3ff-200d-1f680", "woman_astronaut_dark_skin_tone": "1f469-1f3ff-200d-1f680", "female-firefighter_light_skin_tone": "1f469-1f3fb-200d-1f692", "woman_firefighter_light_skin_tone": "1f469-1f3fb-200d-1f692", "female-firefighter_medium_light_skin_tone": "1f469-1f3fc-200d-1f692", "woman_firefighter_medium_light_skin_tone": "1f469-1f3fc-200d-1f692", "female-firefighter_medium_skin_tone": "1f469-1f3fd-200d-1f692", "woman_firefighter_medium_skin_tone": "1f469-1f3fd-200d-1f692", "female-firefighter_medium_dark_skin_tone": "1f469-1f3fe-200d-1f692", "woman_firefighter_medium_dark_skin_tone": "1f469-1f3fe-200d-1f692", "female-firefighter_dark_skin_tone": "1f469-1f3ff-200d-1f692", "woman_firefighter_dark_skin_tone": "1f469-1f3ff-200d-1f692", "woman_with_probing_cane_light_skin_tone": "1f469-1f3fb-200d-1f9af", "woman_with_probing_cane_medium_light_skin_tone": "1f469-1f3fc-200d-1f9af", "woman_with_probing_cane_medium_skin_tone": "1f469-1f3fd-200d-1f9af", "woman_with_probing_cane_medium_dark_skin_tone": "1f469-1f3fe-200d-1f9af", "woman_with_probing_cane_dark_skin_tone": "1f469-1f3ff-200d-1f9af", "red_haired_woman_light_skin_tone": "1f469-1f3fb-200d-1f9b0", "red_haired_woman_medium_light_skin_tone": "1f469-1f3fc-200d-1f9b0", "red_haired_woman_medium_skin_tone": "1f469-1f3fd-200d-1f9b0", "red_haired_woman_medium_dark_skin_tone": "1f469-1f3fe-200d-1f9b0", "red_haired_woman_dark_skin_tone": "1f469-1f3ff-200d-1f9b0", "curly_haired_woman_light_skin_tone": "1f469-1f3fb-200d-1f9b1", "curly_haired_woman_medium_light_skin_tone": "1f469-1f3fc-200d-1f9b1", "curly_haired_woman_medium_skin_tone": "1f469-1f3fd-200d-1f9b1", "curly_haired_woman_medium_dark_skin_tone": "1f469-1f3fe-200d-1f9b1", "curly_haired_woman_dark_skin_tone": "1f469-1f3ff-200d-1f9b1", "bald_woman_light_skin_tone": "1f469-1f3fb-200d-1f9b2", "bald_woman_medium_light_skin_tone": "1f469-1f3fc-200d-1f9b2", "bald_woman_medium_skin_tone": "1f469-1f3fd-200d-1f9b2", "bald_woman_medium_dark_skin_tone": "1f469-1f3fe-200d-1f9b2", "bald_woman_dark_skin_tone": "1f469-1f3ff-200d-1f9b2", "white_haired_woman_light_skin_tone": "1f469-1f3fb-200d-1f9b3", "white_haired_woman_medium_light_skin_tone": "1f469-1f3fc-200d-1f9b3", "white_haired_woman_medium_skin_tone": "1f469-1f3fd-200d-1f9b3", "white_haired_woman_medium_dark_skin_tone": "1f469-1f3fe-200d-1f9b3", "white_haired_woman_dark_skin_tone": "1f469-1f3ff-200d-1f9b3", "woman_in_motorized_wheelchair_light_skin_tone": "1f469-1f3fb-200d-1f9bc", "woman_in_motorized_wheelchair_medium_light_skin_tone": "1f469-1f3fc-200d-1f9bc", "woman_in_motorized_wheelchair_medium_skin_tone": "1f469-1f3fd-200d-1f9bc", "woman_in_motorized_wheelchair_medium_dark_skin_tone": "1f469-1f3fe-200d-1f9bc", "woman_in_motorized_wheelchair_dark_skin_tone": "1f469-1f3ff-200d-1f9bc", "woman_in_manual_wheelchair_light_skin_tone": "1f469-1f3fb-200d-1f9bd", "woman_in_manual_wheelchair_medium_light_skin_tone": "1f469-1f3fc-200d-1f9bd", "woman_in_manual_wheelchair_medium_skin_tone": "1f469-1f3fd-200d-1f9bd", "woman_in_manual_wheelchair_medium_dark_skin_tone": "1f469-1f3fe-200d-1f9bd", "woman_in_manual_wheelchair_dark_skin_tone": "1f469-1f3ff-200d-1f9bd", "female-doctor_light_skin_tone": "1f469-1f3fb-200d-2695-fe0f", "woman_health_worker_light_skin_tone": "1f469-1f3fb-200d-2695-fe0f", "female-doctor_medium_light_skin_tone": "1f469-1f3fc-200d-2695-fe0f", "woman_health_worker_medium_light_skin_tone": "1f469-1f3fc-200d-2695-fe0f", "female-doctor_medium_skin_tone": "1f469-1f3fd-200d-2695-fe0f", "woman_health_worker_medium_skin_tone": "1f469-1f3fd-200d-2695-fe0f", "female-doctor_medium_dark_skin_tone": "1f469-1f3fe-200d-2695-fe0f", "woman_health_worker_medium_dark_skin_tone": "1f469-1f3fe-200d-2695-fe0f", "female-doctor_dark_skin_tone": "1f469-1f3ff-200d-2695-fe0f", "woman_health_worker_dark_skin_tone": "1f469-1f3ff-200d-2695-fe0f", "female-judge_light_skin_tone": "1f469-1f3fb-200d-2696-fe0f", "woman_judge_light_skin_tone": "1f469-1f3fb-200d-2696-fe0f", "female-judge_medium_light_skin_tone": "1f469-1f3fc-200d-2696-fe0f", "woman_judge_medium_light_skin_tone": "1f469-1f3fc-200d-2696-fe0f", "female-judge_medium_skin_tone": "1f469-1f3fd-200d-2696-fe0f", "woman_judge_medium_skin_tone": "1f469-1f3fd-200d-2696-fe0f", "female-judge_medium_dark_skin_tone": "1f469-1f3fe-200d-2696-fe0f", "woman_judge_medium_dark_skin_tone": "1f469-1f3fe-200d-2696-fe0f", "female-judge_dark_skin_tone": "1f469-1f3ff-200d-2696-fe0f", "woman_judge_dark_skin_tone": "1f469-1f3ff-200d-2696-fe0f", "female-pilot_light_skin_tone": "1f469-1f3fb-200d-2708-fe0f", "woman_pilot_light_skin_tone": "1f469-1f3fb-200d-2708-fe0f", "female-pilot_medium_light_skin_tone": "1f469-1f3fc-200d-2708-fe0f", "woman_pilot_medium_light_skin_tone": "1f469-1f3fc-200d-2708-fe0f", "female-pilot_medium_skin_tone": "1f469-1f3fd-200d-2708-fe0f", "woman_pilot_medium_skin_tone": "1f469-1f3fd-200d-2708-fe0f", "female-pilot_medium_dark_skin_tone": "1f469-1f3fe-200d-2708-fe0f", "woman_pilot_medium_dark_skin_tone": "1f469-1f3fe-200d-2708-fe0f", "female-pilot_dark_skin_tone": "1f469-1f3ff-200d-2708-fe0f", "woman_pilot_dark_skin_tone": "1f469-1f3ff-200d-2708-fe0f", "woman_light_skin_tone": "1f469-1f3fb", "woman_medium_light_skin_tone": "1f469-1f3fc", "woman_medium_skin_tone": "1f469-1f3fd", "woman_medium_dark_skin_tone": "1f469-1f3fe", "woman_dark_skin_tone": "1f469-1f3ff", "man_and_woman_holding_hands_light_skin_tone": "1f46b-1f3fb", "woman_and_man_holding_hands_light_skin_tone": "1f46b-1f3fb", "couple_light_skin_tone": "1f46b-1f3fb", "man_and_woman_holding_hands_medium_light_skin_tone": "1f46b-1f3fc", "woman_and_man_holding_hands_medium_light_skin_tone": "1f46b-1f3fc", "couple_medium_light_skin_tone": "1f46b-1f3fc", "man_and_woman_holding_hands_medium_skin_tone": "1f46b-1f3fd", "woman_and_man_holding_hands_medium_skin_tone": "1f46b-1f3fd", "couple_medium_skin_tone": "1f46b-1f3fd", "man_and_woman_holding_hands_medium_dark_skin_tone": "1f46b-1f3fe", "woman_and_man_holding_hands_medium_dark_skin_tone": "1f46b-1f3fe", "couple_medium_dark_skin_tone": "1f46b-1f3fe", "man_and_woman_holding_hands_dark_skin_tone": "1f46b-1f3ff", "woman_and_man_holding_hands_dark_skin_tone": "1f46b-1f3ff", "couple_dark_skin_tone": "1f46b-1f3ff", "man_and_woman_holding_hands_light_skin_tone_medium_light_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fc", "woman_and_man_holding_hands_light_skin_tone_medium_light_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fc", "couple_light_skin_tone_medium_light_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fc", "man_and_woman_holding_hands_light_skin_tone_medium_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fd", "woman_and_man_holding_hands_light_skin_tone_medium_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fd", "couple_light_skin_tone_medium_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fd", "man_and_woman_holding_hands_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fe", "woman_and_man_holding_hands_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fe", "couple_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3fe", "man_and_woman_holding_hands_light_skin_tone_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3ff", "woman_and_man_holding_hands_light_skin_tone_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3ff", "couple_light_skin_tone_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f468-1f3ff", "man_and_woman_holding_hands_medium_light_skin_tone_light_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fb", "woman_and_man_holding_hands_medium_light_skin_tone_light_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fb", "couple_medium_light_skin_tone_light_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fb", "man_and_woman_holding_hands_medium_light_skin_tone_medium_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fd", "woman_and_man_holding_hands_medium_light_skin_tone_medium_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fd", "couple_medium_light_skin_tone_medium_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fd", "man_and_woman_holding_hands_medium_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fe", "woman_and_man_holding_hands_medium_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fe", "couple_medium_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3fe", "man_and_woman_holding_hands_medium_light_skin_tone_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3ff", "woman_and_man_holding_hands_medium_light_skin_tone_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3ff", "couple_medium_light_skin_tone_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f468-1f3ff", "man_and_woman_holding_hands_medium_skin_tone_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fb", "woman_and_man_holding_hands_medium_skin_tone_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fb", "couple_medium_skin_tone_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fb", "man_and_woman_holding_hands_medium_skin_tone_medium_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fc", "woman_and_man_holding_hands_medium_skin_tone_medium_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fc", "couple_medium_skin_tone_medium_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fc", "man_and_woman_holding_hands_medium_skin_tone_medium_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fe", "woman_and_man_holding_hands_medium_skin_tone_medium_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fe", "couple_medium_skin_tone_medium_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3fe", "man_and_woman_holding_hands_medium_skin_tone_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3ff", "woman_and_man_holding_hands_medium_skin_tone_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3ff", "couple_medium_skin_tone_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f468-1f3ff", "man_and_woman_holding_hands_medium_dark_skin_tone_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fb", "woman_and_man_holding_hands_medium_dark_skin_tone_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fb", "couple_medium_dark_skin_tone_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fb", "man_and_woman_holding_hands_medium_dark_skin_tone_medium_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fc", "woman_and_man_holding_hands_medium_dark_skin_tone_medium_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fc", "couple_medium_dark_skin_tone_medium_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fc", "man_and_woman_holding_hands_medium_dark_skin_tone_medium_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fd", "woman_and_man_holding_hands_medium_dark_skin_tone_medium_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fd", "couple_medium_dark_skin_tone_medium_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3fd", "man_and_woman_holding_hands_medium_dark_skin_tone_dark_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3ff", "woman_and_man_holding_hands_medium_dark_skin_tone_dark_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3ff", "couple_medium_dark_skin_tone_dark_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f468-1f3ff", "man_and_woman_holding_hands_dark_skin_tone_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fb", "woman_and_man_holding_hands_dark_skin_tone_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fb", "couple_dark_skin_tone_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fb", "man_and_woman_holding_hands_dark_skin_tone_medium_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fc", "woman_and_man_holding_hands_dark_skin_tone_medium_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fc", "couple_dark_skin_tone_medium_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fc", "man_and_woman_holding_hands_dark_skin_tone_medium_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fd", "woman_and_man_holding_hands_dark_skin_tone_medium_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fd", "couple_dark_skin_tone_medium_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fd", "man_and_woman_holding_hands_dark_skin_tone_medium_dark_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fe", "woman_and_man_holding_hands_dark_skin_tone_medium_dark_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fe", "couple_dark_skin_tone_medium_dark_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f468-1f3fe", "two_men_holding_hands_light_skin_tone": "1f46c-1f3fb", "men_holding_hands_light_skin_tone": "1f46c-1f3fb", "two_men_holding_hands_medium_light_skin_tone": "1f46c-1f3fc", "men_holding_hands_medium_light_skin_tone": "1f46c-1f3fc", "two_men_holding_hands_medium_skin_tone": "1f46c-1f3fd", "men_holding_hands_medium_skin_tone": "1f46c-1f3fd", "two_men_holding_hands_medium_dark_skin_tone": "1f46c-1f3fe", "men_holding_hands_medium_dark_skin_tone": "1f46c-1f3fe", "two_men_holding_hands_dark_skin_tone": "1f46c-1f3ff", "men_holding_hands_dark_skin_tone": "1f46c-1f3ff", "two_men_holding_hands_light_skin_tone_medium_light_skin_tone": "1f468-1f3fb-200d-1f91d-200d-1f468-1f3fc", "men_holding_hands_light_skin_tone_medium_light_skin_tone": "1f468-1f3fb-200d-1f91d-200d-1f468-1f3fc", "two_men_holding_hands_light_skin_tone_medium_skin_tone": "1f468-1f3fb-200d-1f91d-200d-1f468-1f3fd", "men_holding_hands_light_skin_tone_medium_skin_tone": "1f468-1f3fb-200d-1f91d-200d-1f468-1f3fd", "two_men_holding_hands_light_skin_tone_medium_dark_skin_tone": "1f468-1f3fb-200d-1f91d-200d-1f468-1f3fe", "men_holding_hands_light_skin_tone_medium_dark_skin_tone": "1f468-1f3fb-200d-1f91d-200d-1f468-1f3fe", "two_men_holding_hands_light_skin_tone_dark_skin_tone": "1f468-1f3fb-200d-1f91d-200d-1f468-1f3ff", "men_holding_hands_light_skin_tone_dark_skin_tone": "1f468-1f3fb-200d-1f91d-200d-1f468-1f3ff", "two_men_holding_hands_medium_light_skin_tone_light_skin_tone": "1f468-1f3fc-200d-1f91d-200d-1f468-1f3fb", "men_holding_hands_medium_light_skin_tone_light_skin_tone": "1f468-1f3fc-200d-1f91d-200d-1f468-1f3fb", "two_men_holding_hands_medium_light_skin_tone_medium_skin_tone": "1f468-1f3fc-200d-1f91d-200d-1f468-1f3fd", "men_holding_hands_medium_light_skin_tone_medium_skin_tone": "1f468-1f3fc-200d-1f91d-200d-1f468-1f3fd", "two_men_holding_hands_medium_light_skin_tone_medium_dark_skin_tone": "1f468-1f3fc-200d-1f91d-200d-1f468-1f3fe", "men_holding_hands_medium_light_skin_tone_medium_dark_skin_tone": "1f468-1f3fc-200d-1f91d-200d-1f468-1f3fe", "two_men_holding_hands_medium_light_skin_tone_dark_skin_tone": "1f468-1f3fc-200d-1f91d-200d-1f468-1f3ff", "men_holding_hands_medium_light_skin_tone_dark_skin_tone": "1f468-1f3fc-200d-1f91d-200d-1f468-1f3ff", "two_men_holding_hands_medium_skin_tone_light_skin_tone": "1f468-1f3fd-200d-1f91d-200d-1f468-1f3fb", "men_holding_hands_medium_skin_tone_light_skin_tone": "1f468-1f3fd-200d-1f91d-200d-1f468-1f3fb", "two_men_holding_hands_medium_skin_tone_medium_light_skin_tone": "1f468-1f3fd-200d-1f91d-200d-1f468-1f3fc", "men_holding_hands_medium_skin_tone_medium_light_skin_tone": "1f468-1f3fd-200d-1f91d-200d-1f468-1f3fc", "two_men_holding_hands_medium_skin_tone_medium_dark_skin_tone": "1f468-1f3fd-200d-1f91d-200d-1f468-1f3fe", "men_holding_hands_medium_skin_tone_medium_dark_skin_tone": "1f468-1f3fd-200d-1f91d-200d-1f468-1f3fe", "two_men_holding_hands_medium_skin_tone_dark_skin_tone": "1f468-1f3fd-200d-1f91d-200d-1f468-1f3ff", "men_holding_hands_medium_skin_tone_dark_skin_tone": "1f468-1f3fd-200d-1f91d-200d-1f468-1f3ff", "two_men_holding_hands_medium_dark_skin_tone_light_skin_tone": "1f468-1f3fe-200d-1f91d-200d-1f468-1f3fb", "men_holding_hands_medium_dark_skin_tone_light_skin_tone": "1f468-1f3fe-200d-1f91d-200d-1f468-1f3fb", "two_men_holding_hands_medium_dark_skin_tone_medium_light_skin_tone": "1f468-1f3fe-200d-1f91d-200d-1f468-1f3fc", "men_holding_hands_medium_dark_skin_tone_medium_light_skin_tone": "1f468-1f3fe-200d-1f91d-200d-1f468-1f3fc", "two_men_holding_hands_medium_dark_skin_tone_medium_skin_tone": "1f468-1f3fe-200d-1f91d-200d-1f468-1f3fd", "men_holding_hands_medium_dark_skin_tone_medium_skin_tone": "1f468-1f3fe-200d-1f91d-200d-1f468-1f3fd", "two_men_holding_hands_medium_dark_skin_tone_dark_skin_tone": "1f468-1f3fe-200d-1f91d-200d-1f468-1f3ff", "men_holding_hands_medium_dark_skin_tone_dark_skin_tone": "1f468-1f3fe-200d-1f91d-200d-1f468-1f3ff", "two_men_holding_hands_dark_skin_tone_light_skin_tone": "1f468-1f3ff-200d-1f91d-200d-1f468-1f3fb", "men_holding_hands_dark_skin_tone_light_skin_tone": "1f468-1f3ff-200d-1f91d-200d-1f468-1f3fb", "two_men_holding_hands_dark_skin_tone_medium_light_skin_tone": "1f468-1f3ff-200d-1f91d-200d-1f468-1f3fc", "men_holding_hands_dark_skin_tone_medium_light_skin_tone": "1f468-1f3ff-200d-1f91d-200d-1f468-1f3fc", "two_men_holding_hands_dark_skin_tone_medium_skin_tone": "1f468-1f3ff-200d-1f91d-200d-1f468-1f3fd", "men_holding_hands_dark_skin_tone_medium_skin_tone": "1f468-1f3ff-200d-1f91d-200d-1f468-1f3fd", "two_men_holding_hands_dark_skin_tone_medium_dark_skin_tone": "1f468-1f3ff-200d-1f91d-200d-1f468-1f3fe", "men_holding_hands_dark_skin_tone_medium_dark_skin_tone": "1f468-1f3ff-200d-1f91d-200d-1f468-1f3fe", "two_women_holding_hands_light_skin_tone": "1f46d-1f3fb", "women_holding_hands_light_skin_tone": "1f46d-1f3fb", "two_women_holding_hands_medium_light_skin_tone": "1f46d-1f3fc", "women_holding_hands_medium_light_skin_tone": "1f46d-1f3fc", "two_women_holding_hands_medium_skin_tone": "1f46d-1f3fd", "women_holding_hands_medium_skin_tone": "1f46d-1f3fd", "two_women_holding_hands_medium_dark_skin_tone": "1f46d-1f3fe", "women_holding_hands_medium_dark_skin_tone": "1f46d-1f3fe", "two_women_holding_hands_dark_skin_tone": "1f46d-1f3ff", "women_holding_hands_dark_skin_tone": "1f46d-1f3ff", "two_women_holding_hands_light_skin_tone_medium_light_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f469-1f3fc", "women_holding_hands_light_skin_tone_medium_light_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f469-1f3fc", "two_women_holding_hands_light_skin_tone_medium_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f469-1f3fd", "women_holding_hands_light_skin_tone_medium_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f469-1f3fd", "two_women_holding_hands_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f469-1f3fe", "women_holding_hands_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f469-1f3fe", "two_women_holding_hands_light_skin_tone_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f469-1f3ff", "women_holding_hands_light_skin_tone_dark_skin_tone": "1f469-1f3fb-200d-1f91d-200d-1f469-1f3ff", "two_women_holding_hands_medium_light_skin_tone_light_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f469-1f3fb", "women_holding_hands_medium_light_skin_tone_light_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f469-1f3fb", "two_women_holding_hands_medium_light_skin_tone_medium_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f469-1f3fd", "women_holding_hands_medium_light_skin_tone_medium_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f469-1f3fd", "two_women_holding_hands_medium_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f469-1f3fe", "women_holding_hands_medium_light_skin_tone_medium_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f469-1f3fe", "two_women_holding_hands_medium_light_skin_tone_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f469-1f3ff", "women_holding_hands_medium_light_skin_tone_dark_skin_tone": "1f469-1f3fc-200d-1f91d-200d-1f469-1f3ff", "two_women_holding_hands_medium_skin_tone_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f469-1f3fb", "women_holding_hands_medium_skin_tone_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f469-1f3fb", "two_women_holding_hands_medium_skin_tone_medium_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f469-1f3fc", "women_holding_hands_medium_skin_tone_medium_light_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f469-1f3fc", "two_women_holding_hands_medium_skin_tone_medium_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f469-1f3fe", "women_holding_hands_medium_skin_tone_medium_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f469-1f3fe", "two_women_holding_hands_medium_skin_tone_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f469-1f3ff", "women_holding_hands_medium_skin_tone_dark_skin_tone": "1f469-1f3fd-200d-1f91d-200d-1f469-1f3ff", "two_women_holding_hands_medium_dark_skin_tone_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f469-1f3fb", "women_holding_hands_medium_dark_skin_tone_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f469-1f3fb", "two_women_holding_hands_medium_dark_skin_tone_medium_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f469-1f3fc", "women_holding_hands_medium_dark_skin_tone_medium_light_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f469-1f3fc", "two_women_holding_hands_medium_dark_skin_tone_medium_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f469-1f3fd", "women_holding_hands_medium_dark_skin_tone_medium_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f469-1f3fd", "two_women_holding_hands_medium_dark_skin_tone_dark_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f469-1f3ff", "women_holding_hands_medium_dark_skin_tone_dark_skin_tone": "1f469-1f3fe-200d-1f91d-200d-1f469-1f3ff", "two_women_holding_hands_dark_skin_tone_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f469-1f3fb", "women_holding_hands_dark_skin_tone_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f469-1f3fb", "two_women_holding_hands_dark_skin_tone_medium_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f469-1f3fc", "women_holding_hands_dark_skin_tone_medium_light_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f469-1f3fc", "two_women_holding_hands_dark_skin_tone_medium_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f469-1f3fd", "women_holding_hands_dark_skin_tone_medium_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f469-1f3fd", "two_women_holding_hands_dark_skin_tone_medium_dark_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f469-1f3fe", "women_holding_hands_dark_skin_tone_medium_dark_skin_tone": "1f469-1f3ff-200d-1f91d-200d-1f469-1f3fe", "female-police-officer_light_skin_tone": "1f46e-1f3fb-200d-2640-fe0f", "policewoman_light_skin_tone": "1f46e-1f3fb-200d-2640-fe0f", "female-police-officer_medium_light_skin_tone": "1f46e-1f3fc-200d-2640-fe0f", "policewoman_medium_light_skin_tone": "1f46e-1f3fc-200d-2640-fe0f", "female-police-officer_medium_skin_tone": "1f46e-1f3fd-200d-2640-fe0f", "policewoman_medium_skin_tone": "1f46e-1f3fd-200d-2640-fe0f", "female-police-officer_medium_dark_skin_tone": "1f46e-1f3fe-200d-2640-fe0f", "policewoman_medium_dark_skin_tone": "1f46e-1f3fe-200d-2640-fe0f", "female-police-officer_dark_skin_tone": "1f46e-1f3ff-200d-2640-fe0f", "policewoman_dark_skin_tone": "1f46e-1f3ff-200d-2640-fe0f", "male-police-officer_light_skin_tone": "1f46e-1f3fb-200d-2642-fe0f", "policeman_light_skin_tone": "1f46e-1f3fb-200d-2642-fe0f", "male-police-officer_medium_light_skin_tone": "1f46e-1f3fc-200d-2642-fe0f", "policeman_medium_light_skin_tone": "1f46e-1f3fc-200d-2642-fe0f", "male-police-officer_medium_skin_tone": "1f46e-1f3fd-200d-2642-fe0f", "policeman_medium_skin_tone": "1f46e-1f3fd-200d-2642-fe0f", "male-police-officer_medium_dark_skin_tone": "1f46e-1f3fe-200d-2642-fe0f", "policeman_medium_dark_skin_tone": "1f46e-1f3fe-200d-2642-fe0f", "male-police-officer_dark_skin_tone": "1f46e-1f3ff-200d-2642-fe0f", "policeman_dark_skin_tone": "1f46e-1f3ff-200d-2642-fe0f", "cop_light_skin_tone": "1f46e-1f3fb", "cop_medium_light_skin_tone": "1f46e-1f3fc", "cop_medium_skin_tone": "1f46e-1f3fd", "cop_medium_dark_skin_tone": "1f46e-1f3fe", "cop_dark_skin_tone": "1f46e-1f3ff", "woman_with_veil_light_skin_tone": "1f470-1f3fb-200d-2640-fe0f", "woman_with_veil_medium_light_skin_tone": "1f470-1f3fc-200d-2640-fe0f", "woman_with_veil_medium_skin_tone": "1f470-1f3fd-200d-2640-fe0f", "woman_with_veil_medium_dark_skin_tone": "1f470-1f3fe-200d-2640-fe0f", "woman_with_veil_dark_skin_tone": "1f470-1f3ff-200d-2640-fe0f", "man_with_veil_light_skin_tone": "1f470-1f3fb-200d-2642-fe0f", "man_with_veil_medium_light_skin_tone": "1f470-1f3fc-200d-2642-fe0f", "man_with_veil_medium_skin_tone": "1f470-1f3fd-200d-2642-fe0f", "man_with_veil_medium_dark_skin_tone": "1f470-1f3fe-200d-2642-fe0f", "man_with_veil_dark_skin_tone": "1f470-1f3ff-200d-2642-fe0f", "bride_with_veil_light_skin_tone": "1f470-1f3fb", "bride_with_veil_medium_light_skin_tone": "1f470-1f3fc", "bride_with_veil_medium_skin_tone": "1f470-1f3fd", "bride_with_veil_medium_dark_skin_tone": "1f470-1f3fe", "bride_with_veil_dark_skin_tone": "1f470-1f3ff", "blond-haired-woman_light_skin_tone": "1f471-1f3fb-200d-2640-fe0f", "blonde_woman_light_skin_tone": "1f471-1f3fb-200d-2640-fe0f", "blond-haired-woman_medium_light_skin_tone": "1f471-1f3fc-200d-2640-fe0f", "blonde_woman_medium_light_skin_tone": "1f471-1f3fc-200d-2640-fe0f", "blond-haired-woman_medium_skin_tone": "1f471-1f3fd-200d-2640-fe0f", "blonde_woman_medium_skin_tone": "1f471-1f3fd-200d-2640-fe0f", "blond-haired-woman_medium_dark_skin_tone": "1f471-1f3fe-200d-2640-fe0f", "blonde_woman_medium_dark_skin_tone": "1f471-1f3fe-200d-2640-fe0f", "blond-haired-woman_dark_skin_tone": "1f471-1f3ff-200d-2640-fe0f", "blonde_woman_dark_skin_tone": "1f471-1f3ff-200d-2640-fe0f", "blond-haired-man_light_skin_tone": "1f471-1f3fb-200d-2642-fe0f", "blonde_man_light_skin_tone": "1f471-1f3fb-200d-2642-fe0f", "blond-haired-man_medium_light_skin_tone": "1f471-1f3fc-200d-2642-fe0f", "blonde_man_medium_light_skin_tone": "1f471-1f3fc-200d-2642-fe0f", "blond-haired-man_medium_skin_tone": "1f471-1f3fd-200d-2642-fe0f", "blonde_man_medium_skin_tone": "1f471-1f3fd-200d-2642-fe0f", "blond-haired-man_medium_dark_skin_tone": "1f471-1f3fe-200d-2642-fe0f", "blonde_man_medium_dark_skin_tone": "1f471-1f3fe-200d-2642-fe0f", "blond-haired-man_dark_skin_tone": "1f471-1f3ff-200d-2642-fe0f", "blonde_man_dark_skin_tone": "1f471-1f3ff-200d-2642-fe0f", "person_with_blond_hair_light_skin_tone": "1f471-1f3fb", "person_with_blond_hair_medium_light_skin_tone": "1f471-1f3fc", "person_with_blond_hair_medium_skin_tone": "1f471-1f3fd", "person_with_blond_hair_medium_dark_skin_tone": "1f471-1f3fe", "person_with_blond_hair_dark_skin_tone": "1f471-1f3ff", "man_with_gua_pi_mao_light_skin_tone": "1f472-1f3fb", "man_with_gua_pi_mao_medium_light_skin_tone": "1f472-1f3fc", "man_with_gua_pi_mao_medium_skin_tone": "1f472-1f3fd", "man_with_gua_pi_mao_medium_dark_skin_tone": "1f472-1f3fe", "man_with_gua_pi_mao_dark_skin_tone": "1f472-1f3ff", "woman-wearing-turban_light_skin_tone": "1f473-1f3fb-200d-2640-fe0f", "woman_with_turban_light_skin_tone": "1f473-1f3fb-200d-2640-fe0f", "woman-wearing-turban_medium_light_skin_tone": "1f473-1f3fc-200d-2640-fe0f", "woman_with_turban_medium_light_skin_tone": "1f473-1f3fc-200d-2640-fe0f", "woman-wearing-turban_medium_skin_tone": "1f473-1f3fd-200d-2640-fe0f", "woman_with_turban_medium_skin_tone": "1f473-1f3fd-200d-2640-fe0f", "woman-wearing-turban_medium_dark_skin_tone": "1f473-1f3fe-200d-2640-fe0f", "woman_with_turban_medium_dark_skin_tone": "1f473-1f3fe-200d-2640-fe0f", "woman-wearing-turban_dark_skin_tone": "1f473-1f3ff-200d-2640-fe0f", "woman_with_turban_dark_skin_tone": "1f473-1f3ff-200d-2640-fe0f", "man-wearing-turban_light_skin_tone": "1f473-1f3fb-200d-2642-fe0f", "man-wearing-turban_medium_light_skin_tone": "1f473-1f3fc-200d-2642-fe0f", "man-wearing-turban_medium_skin_tone": "1f473-1f3fd-200d-2642-fe0f", "man-wearing-turban_medium_dark_skin_tone": "1f473-1f3fe-200d-2642-fe0f", "man-wearing-turban_dark_skin_tone": "1f473-1f3ff-200d-2642-fe0f", "man_with_turban_light_skin_tone": "1f473-1f3fb", "man_with_turban_medium_light_skin_tone": "1f473-1f3fc", "man_with_turban_medium_skin_tone": "1f473-1f3fd", "man_with_turban_medium_dark_skin_tone": "1f473-1f3fe", "man_with_turban_dark_skin_tone": "1f473-1f3ff", "older_man_light_skin_tone": "1f474-1f3fb", "older_man_medium_light_skin_tone": "1f474-1f3fc", "older_man_medium_skin_tone": "1f474-1f3fd", "older_man_medium_dark_skin_tone": "1f474-1f3fe", "older_man_dark_skin_tone": "1f474-1f3ff", "older_woman_light_skin_tone": "1f475-1f3fb", "older_woman_medium_light_skin_tone": "1f475-1f3fc", "older_woman_medium_skin_tone": "1f475-1f3fd", "older_woman_medium_dark_skin_tone": "1f475-1f3fe", "older_woman_dark_skin_tone": "1f475-1f3ff", "baby_light_skin_tone": "1f476-1f3fb", "baby_medium_light_skin_tone": "1f476-1f3fc", "baby_medium_skin_tone": "1f476-1f3fd", "baby_medium_dark_skin_tone": "1f476-1f3fe", "baby_dark_skin_tone": "1f476-1f3ff", "female-construction-worker_light_skin_tone": "1f477-1f3fb-200d-2640-fe0f", "construction_worker_woman_light_skin_tone": "1f477-1f3fb-200d-2640-fe0f", "female-construction-worker_medium_light_skin_tone": "1f477-1f3fc-200d-2640-fe0f", "construction_worker_woman_medium_light_skin_tone": "1f477-1f3fc-200d-2640-fe0f", "female-construction-worker_medium_skin_tone": "1f477-1f3fd-200d-2640-fe0f", "construction_worker_woman_medium_skin_tone": "1f477-1f3fd-200d-2640-fe0f", "female-construction-worker_medium_dark_skin_tone": "1f477-1f3fe-200d-2640-fe0f", "construction_worker_woman_medium_dark_skin_tone": "1f477-1f3fe-200d-2640-fe0f", "female-construction-worker_dark_skin_tone": "1f477-1f3ff-200d-2640-fe0f", "construction_worker_woman_dark_skin_tone": "1f477-1f3ff-200d-2640-fe0f", "male-construction-worker_light_skin_tone": "1f477-1f3fb-200d-2642-fe0f", "construction_worker_man_light_skin_tone": "1f477-1f3fb-200d-2642-fe0f", "male-construction-worker_medium_light_skin_tone": "1f477-1f3fc-200d-2642-fe0f", "construction_worker_man_medium_light_skin_tone": "1f477-1f3fc-200d-2642-fe0f", "male-construction-worker_medium_skin_tone": "1f477-1f3fd-200d-2642-fe0f", "construction_worker_man_medium_skin_tone": "1f477-1f3fd-200d-2642-fe0f", "male-construction-worker_medium_dark_skin_tone": "1f477-1f3fe-200d-2642-fe0f", "construction_worker_man_medium_dark_skin_tone": "1f477-1f3fe-200d-2642-fe0f", "male-construction-worker_dark_skin_tone": "1f477-1f3ff-200d-2642-fe0f", "construction_worker_man_dark_skin_tone": "1f477-1f3ff-200d-2642-fe0f", "construction_worker_light_skin_tone": "1f477-1f3fb", "construction_worker_medium_light_skin_tone": "1f477-1f3fc", "construction_worker_medium_skin_tone": "1f477-1f3fd", "construction_worker_medium_dark_skin_tone": "1f477-1f3fe", "construction_worker_dark_skin_tone": "1f477-1f3ff", "princess_light_skin_tone": "1f478-1f3fb", "princess_medium_light_skin_tone": "1f478-1f3fc", "princess_medium_skin_tone": "1f478-1f3fd", "princess_medium_dark_skin_tone": "1f478-1f3fe", "princess_dark_skin_tone": "1f478-1f3ff", "angel_light_skin_tone": "1f47c-1f3fb", "angel_medium_light_skin_tone": "1f47c-1f3fc", "angel_medium_skin_tone": "1f47c-1f3fd", "angel_medium_dark_skin_tone": "1f47c-1f3fe", "angel_dark_skin_tone": "1f47c-1f3ff", "woman-tipping-hand_light_skin_tone": "1f481-1f3fb-200d-2640-fe0f", "tipping_hand_woman_light_skin_tone": "1f481-1f3fb-200d-2640-fe0f", "woman-tipping-hand_medium_light_skin_tone": "1f481-1f3fc-200d-2640-fe0f", "tipping_hand_woman_medium_light_skin_tone": "1f481-1f3fc-200d-2640-fe0f", "woman-tipping-hand_medium_skin_tone": "1f481-1f3fd-200d-2640-fe0f", "tipping_hand_woman_medium_skin_tone": "1f481-1f3fd-200d-2640-fe0f", "woman-tipping-hand_medium_dark_skin_tone": "1f481-1f3fe-200d-2640-fe0f", "tipping_hand_woman_medium_dark_skin_tone": "1f481-1f3fe-200d-2640-fe0f", "woman-tipping-hand_dark_skin_tone": "1f481-1f3ff-200d-2640-fe0f", "tipping_hand_woman_dark_skin_tone": "1f481-1f3ff-200d-2640-fe0f", "man-tipping-hand_light_skin_tone": "1f481-1f3fb-200d-2642-fe0f", "tipping_hand_man_light_skin_tone": "1f481-1f3fb-200d-2642-fe0f", "man-tipping-hand_medium_light_skin_tone": "1f481-1f3fc-200d-2642-fe0f", "tipping_hand_man_medium_light_skin_tone": "1f481-1f3fc-200d-2642-fe0f", "man-tipping-hand_medium_skin_tone": "1f481-1f3fd-200d-2642-fe0f", "tipping_hand_man_medium_skin_tone": "1f481-1f3fd-200d-2642-fe0f", "man-tipping-hand_medium_dark_skin_tone": "1f481-1f3fe-200d-2642-fe0f", "tipping_hand_man_medium_dark_skin_tone": "1f481-1f3fe-200d-2642-fe0f", "man-tipping-hand_dark_skin_tone": "1f481-1f3ff-200d-2642-fe0f", "tipping_hand_man_dark_skin_tone": "1f481-1f3ff-200d-2642-fe0f", "information_desk_person_light_skin_tone": "1f481-1f3fb", "information_desk_person_medium_light_skin_tone": "1f481-1f3fc", "information_desk_person_medium_skin_tone": "1f481-1f3fd", "information_desk_person_medium_dark_skin_tone": "1f481-1f3fe", "information_desk_person_dark_skin_tone": "1f481-1f3ff", "female-guard_light_skin_tone": "1f482-1f3fb-200d-2640-fe0f", "guardswoman_light_skin_tone": "1f482-1f3fb-200d-2640-fe0f", "female-guard_medium_light_skin_tone": "1f482-1f3fc-200d-2640-fe0f", "guardswoman_medium_light_skin_tone": "1f482-1f3fc-200d-2640-fe0f", "female-guard_medium_skin_tone": "1f482-1f3fd-200d-2640-fe0f", "guardswoman_medium_skin_tone": "1f482-1f3fd-200d-2640-fe0f", "female-guard_medium_dark_skin_tone": "1f482-1f3fe-200d-2640-fe0f", "guardswoman_medium_dark_skin_tone": "1f482-1f3fe-200d-2640-fe0f", "female-guard_dark_skin_tone": "1f482-1f3ff-200d-2640-fe0f", "guardswoman_dark_skin_tone": "1f482-1f3ff-200d-2640-fe0f", "male-guard_light_skin_tone": "1f482-1f3fb-200d-2642-fe0f", "male-guard_medium_light_skin_tone": "1f482-1f3fc-200d-2642-fe0f", "male-guard_medium_skin_tone": "1f482-1f3fd-200d-2642-fe0f", "male-guard_medium_dark_skin_tone": "1f482-1f3fe-200d-2642-fe0f", "male-guard_dark_skin_tone": "1f482-1f3ff-200d-2642-fe0f", "guardsman_light_skin_tone": "1f482-1f3fb", "guardsman_medium_light_skin_tone": "1f482-1f3fc", "guardsman_medium_skin_tone": "1f482-1f3fd", "guardsman_medium_dark_skin_tone": "1f482-1f3fe", "guardsman_dark_skin_tone": "1f482-1f3ff", "dancer_light_skin_tone": "1f483-1f3fb", "dancer_medium_light_skin_tone": "1f483-1f3fc", "dancer_medium_skin_tone": "1f483-1f3fd", "dancer_medium_dark_skin_tone": "1f483-1f3fe", "dancer_dark_skin_tone": "1f483-1f3ff", "nail_care_light_skin_tone": "1f485-1f3fb", "nail_care_medium_light_skin_tone": "1f485-1f3fc", "nail_care_medium_skin_tone": "1f485-1f3fd", "nail_care_medium_dark_skin_tone": "1f485-1f3fe", "nail_care_dark_skin_tone": "1f485-1f3ff", "woman-getting-massage_light_skin_tone": "1f486-1f3fb-200d-2640-fe0f", "massage_woman_light_skin_tone": "1f486-1f3fb-200d-2640-fe0f", "woman-getting-massage_medium_light_skin_tone": "1f486-1f3fc-200d-2640-fe0f", "massage_woman_medium_light_skin_tone": "1f486-1f3fc-200d-2640-fe0f", "woman-getting-massage_medium_skin_tone": "1f486-1f3fd-200d-2640-fe0f", "massage_woman_medium_skin_tone": "1f486-1f3fd-200d-2640-fe0f", "woman-getting-massage_medium_dark_skin_tone": "1f486-1f3fe-200d-2640-fe0f", "massage_woman_medium_dark_skin_tone": "1f486-1f3fe-200d-2640-fe0f", "woman-getting-massage_dark_skin_tone": "1f486-1f3ff-200d-2640-fe0f", "massage_woman_dark_skin_tone": "1f486-1f3ff-200d-2640-fe0f", "man-getting-massage_light_skin_tone": "1f486-1f3fb-200d-2642-fe0f", "massage_man_light_skin_tone": "1f486-1f3fb-200d-2642-fe0f", "man-getting-massage_medium_light_skin_tone": "1f486-1f3fc-200d-2642-fe0f", "massage_man_medium_light_skin_tone": "1f486-1f3fc-200d-2642-fe0f", "man-getting-massage_medium_skin_tone": "1f486-1f3fd-200d-2642-fe0f", "massage_man_medium_skin_tone": "1f486-1f3fd-200d-2642-fe0f", "man-getting-massage_medium_dark_skin_tone": "1f486-1f3fe-200d-2642-fe0f", "massage_man_medium_dark_skin_tone": "1f486-1f3fe-200d-2642-fe0f", "man-getting-massage_dark_skin_tone": "1f486-1f3ff-200d-2642-fe0f", "massage_man_dark_skin_tone": "1f486-1f3ff-200d-2642-fe0f", "massage_light_skin_tone": "1f486-1f3fb", "massage_medium_light_skin_tone": "1f486-1f3fc", "massage_medium_skin_tone": "1f486-1f3fd", "massage_medium_dark_skin_tone": "1f486-1f3fe", "massage_dark_skin_tone": "1f486-1f3ff", "woman-getting-haircut_light_skin_tone": "1f487-1f3fb-200d-2640-fe0f", "haircut_woman_light_skin_tone": "1f487-1f3fb-200d-2640-fe0f", "woman-getting-haircut_medium_light_skin_tone": "1f487-1f3fc-200d-2640-fe0f", "haircut_woman_medium_light_skin_tone": "1f487-1f3fc-200d-2640-fe0f", "woman-getting-haircut_medium_skin_tone": "1f487-1f3fd-200d-2640-fe0f", "haircut_woman_medium_skin_tone": "1f487-1f3fd-200d-2640-fe0f", "woman-getting-haircut_medium_dark_skin_tone": "1f487-1f3fe-200d-2640-fe0f", "haircut_woman_medium_dark_skin_tone": "1f487-1f3fe-200d-2640-fe0f", "woman-getting-haircut_dark_skin_tone": "1f487-1f3ff-200d-2640-fe0f", "haircut_woman_dark_skin_tone": "1f487-1f3ff-200d-2640-fe0f", "man-getting-haircut_light_skin_tone": "1f487-1f3fb-200d-2642-fe0f", "haircut_man_light_skin_tone": "1f487-1f3fb-200d-2642-fe0f", "man-getting-haircut_medium_light_skin_tone": "1f487-1f3fc-200d-2642-fe0f", "haircut_man_medium_light_skin_tone": "1f487-1f3fc-200d-2642-fe0f", "man-getting-haircut_medium_skin_tone": "1f487-1f3fd-200d-2642-fe0f", "haircut_man_medium_skin_tone": "1f487-1f3fd-200d-2642-fe0f", "man-getting-haircut_medium_dark_skin_tone": "1f487-1f3fe-200d-2642-fe0f", "haircut_man_medium_dark_skin_tone": "1f487-1f3fe-200d-2642-fe0f", "man-getting-haircut_dark_skin_tone": "1f487-1f3ff-200d-2642-fe0f", "haircut_man_dark_skin_tone": "1f487-1f3ff-200d-2642-fe0f", "haircut_light_skin_tone": "1f487-1f3fb", "haircut_medium_light_skin_tone": "1f487-1f3fc", "haircut_medium_skin_tone": "1f487-1f3fd", "haircut_medium_dark_skin_tone": "1f487-1f3fe", "haircut_dark_skin_tone": "1f487-1f3ff", "muscle_light_skin_tone": "1f4aa-1f3fb", "muscle_medium_light_skin_tone": "1f4aa-1f3fc", "muscle_medium_skin_tone": "1f4aa-1f3fd", "muscle_medium_dark_skin_tone": "1f4aa-1f3fe", "muscle_dark_skin_tone": "1f4aa-1f3ff", "man_in_business_suit_levitating_light_skin_tone": "1f574-1f3fb", "business_suit_levitating_light_skin_tone": "1f574-1f3fb", "man_in_business_suit_levitating_medium_light_skin_tone": "1f574-1f3fc", "business_suit_levitating_medium_light_skin_tone": "1f574-1f3fc", "man_in_business_suit_levitating_medium_skin_tone": "1f574-1f3fd", "business_suit_levitating_medium_skin_tone": "1f574-1f3fd", "man_in_business_suit_levitating_medium_dark_skin_tone": "1f574-1f3fe", "business_suit_levitating_medium_dark_skin_tone": "1f574-1f3fe", "man_in_business_suit_levitating_dark_skin_tone": "1f574-1f3ff", "business_suit_levitating_dark_skin_tone": "1f574-1f3ff", "female-detective_light_skin_tone": "1f575-1f3fb-200d-2640-fe0f", "female_detective_light_skin_tone": "1f575-1f3fb-200d-2640-fe0f", "female-detective_medium_light_skin_tone": "1f575-1f3fc-200d-2640-fe0f", "female_detective_medium_light_skin_tone": "1f575-1f3fc-200d-2640-fe0f", "female-detective_medium_skin_tone": "1f575-1f3fd-200d-2640-fe0f", "female_detective_medium_skin_tone": "1f575-1f3fd-200d-2640-fe0f", "female-detective_medium_dark_skin_tone": "1f575-1f3fe-200d-2640-fe0f", "female_detective_medium_dark_skin_tone": "1f575-1f3fe-200d-2640-fe0f", "female-detective_dark_skin_tone": "1f575-1f3ff-200d-2640-fe0f", "female_detective_dark_skin_tone": "1f575-1f3ff-200d-2640-fe0f", "male-detective_light_skin_tone": "1f575-1f3fb-200d-2642-fe0f", "male_detective_light_skin_tone": "1f575-1f3fb-200d-2642-fe0f", "male-detective_medium_light_skin_tone": "1f575-1f3fc-200d-2642-fe0f", "male_detective_medium_light_skin_tone": "1f575-1f3fc-200d-2642-fe0f", "male-detective_medium_skin_tone": "1f575-1f3fd-200d-2642-fe0f", "male_detective_medium_skin_tone": "1f575-1f3fd-200d-2642-fe0f", "male-detective_medium_dark_skin_tone": "1f575-1f3fe-200d-2642-fe0f", "male_detective_medium_dark_skin_tone": "1f575-1f3fe-200d-2642-fe0f", "male-detective_dark_skin_tone": "1f575-1f3ff-200d-2642-fe0f", "male_detective_dark_skin_tone": "1f575-1f3ff-200d-2642-fe0f", "sleuth_or_spy_light_skin_tone": "1f575-1f3fb", "sleuth_or_spy_medium_light_skin_tone": "1f575-1f3fc", "sleuth_or_spy_medium_skin_tone": "1f575-1f3fd", "sleuth_or_spy_medium_dark_skin_tone": "1f575-1f3fe", "sleuth_or_spy_dark_skin_tone": "1f575-1f3ff", "man_dancing_light_skin_tone": "1f57a-1f3fb", "man_dancing_medium_light_skin_tone": "1f57a-1f3fc", "man_dancing_medium_skin_tone": "1f57a-1f3fd", "man_dancing_medium_dark_skin_tone": "1f57a-1f3fe", "man_dancing_dark_skin_tone": "1f57a-1f3ff", "raised_hand_with_fingers_splayed_light_skin_tone": "1f590-1f3fb", "raised_hand_with_fingers_splayed_medium_light_skin_tone": "1f590-1f3fc", "raised_hand_with_fingers_splayed_medium_skin_tone": "1f590-1f3fd", "raised_hand_with_fingers_splayed_medium_dark_skin_tone": "1f590-1f3fe", "raised_hand_with_fingers_splayed_dark_skin_tone": "1f590-1f3ff", "middle_finger_light_skin_tone": "1f595-1f3fb", "reversed_hand_with_middle_finger_extended_light_skin_tone": "1f595-1f3fb", "middle_finger_medium_light_skin_tone": "1f595-1f3fc", "reversed_hand_with_middle_finger_extended_medium_light_skin_tone": "1f595-1f3fc", "middle_finger_medium_skin_tone": "1f595-1f3fd", "reversed_hand_with_middle_finger_extended_medium_skin_tone": "1f595-1f3fd", "middle_finger_medium_dark_skin_tone": "1f595-1f3fe", "reversed_hand_with_middle_finger_extended_medium_dark_skin_tone": "1f595-1f3fe", "middle_finger_dark_skin_tone": "1f595-1f3ff", "reversed_hand_with_middle_finger_extended_dark_skin_tone": "1f595-1f3ff", "spock-hand_light_skin_tone": "1f596-1f3fb", "vulcan_salute_light_skin_tone": "1f596-1f3fb", "spock-hand_medium_light_skin_tone": "1f596-1f3fc", "vulcan_salute_medium_light_skin_tone": "1f596-1f3fc", "spock-hand_medium_skin_tone": "1f596-1f3fd", "vulcan_salute_medium_skin_tone": "1f596-1f3fd", "spock-hand_medium_dark_skin_tone": "1f596-1f3fe", "vulcan_salute_medium_dark_skin_tone": "1f596-1f3fe", "spock-hand_dark_skin_tone": "1f596-1f3ff", "vulcan_salute_dark_skin_tone": "1f596-1f3ff", "woman-gesturing-no_light_skin_tone": "1f645-1f3fb-200d-2640-fe0f", "no_good_woman_light_skin_tone": "1f645-1f3fb-200d-2640-fe0f", "woman-gesturing-no_medium_light_skin_tone": "1f645-1f3fc-200d-2640-fe0f", "no_good_woman_medium_light_skin_tone": "1f645-1f3fc-200d-2640-fe0f", "woman-gesturing-no_medium_skin_tone": "1f645-1f3fd-200d-2640-fe0f", "no_good_woman_medium_skin_tone": "1f645-1f3fd-200d-2640-fe0f", "woman-gesturing-no_medium_dark_skin_tone": "1f645-1f3fe-200d-2640-fe0f", "no_good_woman_medium_dark_skin_tone": "1f645-1f3fe-200d-2640-fe0f", "woman-gesturing-no_dark_skin_tone": "1f645-1f3ff-200d-2640-fe0f", "no_good_woman_dark_skin_tone": "1f645-1f3ff-200d-2640-fe0f", "man-gesturing-no_light_skin_tone": "1f645-1f3fb-200d-2642-fe0f", "no_good_man_light_skin_tone": "1f645-1f3fb-200d-2642-fe0f", "man-gesturing-no_medium_light_skin_tone": "1f645-1f3fc-200d-2642-fe0f", "no_good_man_medium_light_skin_tone": "1f645-1f3fc-200d-2642-fe0f", "man-gesturing-no_medium_skin_tone": "1f645-1f3fd-200d-2642-fe0f", "no_good_man_medium_skin_tone": "1f645-1f3fd-200d-2642-fe0f", "man-gesturing-no_medium_dark_skin_tone": "1f645-1f3fe-200d-2642-fe0f", "no_good_man_medium_dark_skin_tone": "1f645-1f3fe-200d-2642-fe0f", "man-gesturing-no_dark_skin_tone": "1f645-1f3ff-200d-2642-fe0f", "no_good_man_dark_skin_tone": "1f645-1f3ff-200d-2642-fe0f", "no_good_light_skin_tone": "1f645-1f3fb", "no_good_medium_light_skin_tone": "1f645-1f3fc", "no_good_medium_skin_tone": "1f645-1f3fd", "no_good_medium_dark_skin_tone": "1f645-1f3fe", "no_good_dark_skin_tone": "1f645-1f3ff", "woman-gesturing-ok_light_skin_tone": "1f646-1f3fb-200d-2640-fe0f", "woman-gesturing-ok_medium_light_skin_tone": "1f646-1f3fc-200d-2640-fe0f", "woman-gesturing-ok_medium_skin_tone": "1f646-1f3fd-200d-2640-fe0f", "woman-gesturing-ok_medium_dark_skin_tone": "1f646-1f3fe-200d-2640-fe0f", "woman-gesturing-ok_dark_skin_tone": "1f646-1f3ff-200d-2640-fe0f", "man-gesturing-ok_light_skin_tone": "1f646-1f3fb-200d-2642-fe0f", "ok_man_light_skin_tone": "1f646-1f3fb-200d-2642-fe0f", "man-gesturing-ok_medium_light_skin_tone": "1f646-1f3fc-200d-2642-fe0f", "ok_man_medium_light_skin_tone": "1f646-1f3fc-200d-2642-fe0f", "man-gesturing-ok_medium_skin_tone": "1f646-1f3fd-200d-2642-fe0f", "ok_man_medium_skin_tone": "1f646-1f3fd-200d-2642-fe0f", "man-gesturing-ok_medium_dark_skin_tone": "1f646-1f3fe-200d-2642-fe0f", "ok_man_medium_dark_skin_tone": "1f646-1f3fe-200d-2642-fe0f", "man-gesturing-ok_dark_skin_tone": "1f646-1f3ff-200d-2642-fe0f", "ok_man_dark_skin_tone": "1f646-1f3ff-200d-2642-fe0f", "ok_woman_light_skin_tone": "1f646-1f3fb", "ok_woman_medium_light_skin_tone": "1f646-1f3fc", "ok_woman_medium_skin_tone": "1f646-1f3fd", "ok_woman_medium_dark_skin_tone": "1f646-1f3fe", "ok_woman_dark_skin_tone": "1f646-1f3ff", "woman-bowing_light_skin_tone": "1f647-1f3fb-200d-2640-fe0f", "bowing_woman_light_skin_tone": "1f647-1f3fb-200d-2640-fe0f", "woman-bowing_medium_light_skin_tone": "1f647-1f3fc-200d-2640-fe0f", "bowing_woman_medium_light_skin_tone": "1f647-1f3fc-200d-2640-fe0f", "woman-bowing_medium_skin_tone": "1f647-1f3fd-200d-2640-fe0f", "bowing_woman_medium_skin_tone": "1f647-1f3fd-200d-2640-fe0f", "woman-bowing_medium_dark_skin_tone": "1f647-1f3fe-200d-2640-fe0f", "bowing_woman_medium_dark_skin_tone": "1f647-1f3fe-200d-2640-fe0f", "woman-bowing_dark_skin_tone": "1f647-1f3ff-200d-2640-fe0f", "bowing_woman_dark_skin_tone": "1f647-1f3ff-200d-2640-fe0f", "man-bowing_light_skin_tone": "1f647-1f3fb-200d-2642-fe0f", "bowing_man_light_skin_tone": "1f647-1f3fb-200d-2642-fe0f", "man-bowing_medium_light_skin_tone": "1f647-1f3fc-200d-2642-fe0f", "bowing_man_medium_light_skin_tone": "1f647-1f3fc-200d-2642-fe0f", "man-bowing_medium_skin_tone": "1f647-1f3fd-200d-2642-fe0f", "bowing_man_medium_skin_tone": "1f647-1f3fd-200d-2642-fe0f", "man-bowing_medium_dark_skin_tone": "1f647-1f3fe-200d-2642-fe0f", "bowing_man_medium_dark_skin_tone": "1f647-1f3fe-200d-2642-fe0f", "man-bowing_dark_skin_tone": "1f647-1f3ff-200d-2642-fe0f", "bowing_man_dark_skin_tone": "1f647-1f3ff-200d-2642-fe0f", "bow_light_skin_tone": "1f647-1f3fb", "bow_medium_light_skin_tone": "1f647-1f3fc", "bow_medium_skin_tone": "1f647-1f3fd", "bow_medium_dark_skin_tone": "1f647-1f3fe", "bow_dark_skin_tone": "1f647-1f3ff", "woman-raising-hand_light_skin_tone": "1f64b-1f3fb-200d-2640-fe0f", "raising_hand_woman_light_skin_tone": "1f64b-1f3fb-200d-2640-fe0f", "woman-raising-hand_medium_light_skin_tone": "1f64b-1f3fc-200d-2640-fe0f", "raising_hand_woman_medium_light_skin_tone": "1f64b-1f3fc-200d-2640-fe0f", "woman-raising-hand_medium_skin_tone": "1f64b-1f3fd-200d-2640-fe0f", "raising_hand_woman_medium_skin_tone": "1f64b-1f3fd-200d-2640-fe0f", "woman-raising-hand_medium_dark_skin_tone": "1f64b-1f3fe-200d-2640-fe0f", "raising_hand_woman_medium_dark_skin_tone": "1f64b-1f3fe-200d-2640-fe0f", "woman-raising-hand_dark_skin_tone": "1f64b-1f3ff-200d-2640-fe0f", "raising_hand_woman_dark_skin_tone": "1f64b-1f3ff-200d-2640-fe0f", "man-raising-hand_light_skin_tone": "1f64b-1f3fb-200d-2642-fe0f", "raising_hand_man_light_skin_tone": "1f64b-1f3fb-200d-2642-fe0f", "man-raising-hand_medium_light_skin_tone": "1f64b-1f3fc-200d-2642-fe0f", "raising_hand_man_medium_light_skin_tone": "1f64b-1f3fc-200d-2642-fe0f", "man-raising-hand_medium_skin_tone": "1f64b-1f3fd-200d-2642-fe0f", "raising_hand_man_medium_skin_tone": "1f64b-1f3fd-200d-2642-fe0f", "man-raising-hand_medium_dark_skin_tone": "1f64b-1f3fe-200d-2642-fe0f", "raising_hand_man_medium_dark_skin_tone": "1f64b-1f3fe-200d-2642-fe0f", "man-raising-hand_dark_skin_tone": "1f64b-1f3ff-200d-2642-fe0f", "raising_hand_man_dark_skin_tone": "1f64b-1f3ff-200d-2642-fe0f", "raising_hand_light_skin_tone": "1f64b-1f3fb", "raising_hand_medium_light_skin_tone": "1f64b-1f3fc", "raising_hand_medium_skin_tone": "1f64b-1f3fd", "raising_hand_medium_dark_skin_tone": "1f64b-1f3fe", "raising_hand_dark_skin_tone": "1f64b-1f3ff", "raised_hands_light_skin_tone": "1f64c-1f3fb", "raised_hands_medium_light_skin_tone": "1f64c-1f3fc", "raised_hands_medium_skin_tone": "1f64c-1f3fd", "raised_hands_medium_dark_skin_tone": "1f64c-1f3fe", "raised_hands_dark_skin_tone": "1f64c-1f3ff", "woman-frowning_light_skin_tone": "1f64d-1f3fb-200d-2640-fe0f", "frowning_woman_light_skin_tone": "1f64d-1f3fb-200d-2640-fe0f", "woman-frowning_medium_light_skin_tone": "1f64d-1f3fc-200d-2640-fe0f", "frowning_woman_medium_light_skin_tone": "1f64d-1f3fc-200d-2640-fe0f", "woman-frowning_medium_skin_tone": "1f64d-1f3fd-200d-2640-fe0f", "frowning_woman_medium_skin_tone": "1f64d-1f3fd-200d-2640-fe0f", "woman-frowning_medium_dark_skin_tone": "1f64d-1f3fe-200d-2640-fe0f", "frowning_woman_medium_dark_skin_tone": "1f64d-1f3fe-200d-2640-fe0f", "woman-frowning_dark_skin_tone": "1f64d-1f3ff-200d-2640-fe0f", "frowning_woman_dark_skin_tone": "1f64d-1f3ff-200d-2640-fe0f", "man-frowning_light_skin_tone": "1f64d-1f3fb-200d-2642-fe0f", "frowning_man_light_skin_tone": "1f64d-1f3fb-200d-2642-fe0f", "man-frowning_medium_light_skin_tone": "1f64d-1f3fc-200d-2642-fe0f", "frowning_man_medium_light_skin_tone": "1f64d-1f3fc-200d-2642-fe0f", "man-frowning_medium_skin_tone": "1f64d-1f3fd-200d-2642-fe0f", "frowning_man_medium_skin_tone": "1f64d-1f3fd-200d-2642-fe0f", "man-frowning_medium_dark_skin_tone": "1f64d-1f3fe-200d-2642-fe0f", "frowning_man_medium_dark_skin_tone": "1f64d-1f3fe-200d-2642-fe0f", "man-frowning_dark_skin_tone": "1f64d-1f3ff-200d-2642-fe0f", "frowning_man_dark_skin_tone": "1f64d-1f3ff-200d-2642-fe0f", "person_frowning_light_skin_tone": "1f64d-1f3fb", "person_frowning_medium_light_skin_tone": "1f64d-1f3fc", "person_frowning_medium_skin_tone": "1f64d-1f3fd", "person_frowning_medium_dark_skin_tone": "1f64d-1f3fe", "person_frowning_dark_skin_tone": "1f64d-1f3ff", "woman-pouting_light_skin_tone": "1f64e-1f3fb-200d-2640-fe0f", "pouting_woman_light_skin_tone": "1f64e-1f3fb-200d-2640-fe0f", "woman-pouting_medium_light_skin_tone": "1f64e-1f3fc-200d-2640-fe0f", "pouting_woman_medium_light_skin_tone": "1f64e-1f3fc-200d-2640-fe0f", "woman-pouting_medium_skin_tone": "1f64e-1f3fd-200d-2640-fe0f", "pouting_woman_medium_skin_tone": "1f64e-1f3fd-200d-2640-fe0f", "woman-pouting_medium_dark_skin_tone": "1f64e-1f3fe-200d-2640-fe0f", "pouting_woman_medium_dark_skin_tone": "1f64e-1f3fe-200d-2640-fe0f", "woman-pouting_dark_skin_tone": "1f64e-1f3ff-200d-2640-fe0f", "pouting_woman_dark_skin_tone": "1f64e-1f3ff-200d-2640-fe0f", "man-pouting_light_skin_tone": "1f64e-1f3fb-200d-2642-fe0f", "pouting_man_light_skin_tone": "1f64e-1f3fb-200d-2642-fe0f", "man-pouting_medium_light_skin_tone": "1f64e-1f3fc-200d-2642-fe0f", "pouting_man_medium_light_skin_tone": "1f64e-1f3fc-200d-2642-fe0f", "man-pouting_medium_skin_tone": "1f64e-1f3fd-200d-2642-fe0f", "pouting_man_medium_skin_tone": "1f64e-1f3fd-200d-2642-fe0f", "man-pouting_medium_dark_skin_tone": "1f64e-1f3fe-200d-2642-fe0f", "pouting_man_medium_dark_skin_tone": "1f64e-1f3fe-200d-2642-fe0f", "man-pouting_dark_skin_tone": "1f64e-1f3ff-200d-2642-fe0f", "pouting_man_dark_skin_tone": "1f64e-1f3ff-200d-2642-fe0f", "person_with_pouting_face_light_skin_tone": "1f64e-1f3fb", "person_with_pouting_face_medium_light_skin_tone": "1f64e-1f3fc", "person_with_pouting_face_medium_skin_tone": "1f64e-1f3fd", "person_with_pouting_face_medium_dark_skin_tone": "1f64e-1f3fe", "person_with_pouting_face_dark_skin_tone": "1f64e-1f3ff", "pray_light_skin_tone": "1f64f-1f3fb", "pray_medium_light_skin_tone": "1f64f-1f3fc", "pray_medium_skin_tone": "1f64f-1f3fd", "pray_medium_dark_skin_tone": "1f64f-1f3fe", "pray_dark_skin_tone": "1f64f-1f3ff", "woman-rowing-boat_light_skin_tone": "1f6a3-1f3fb-200d-2640-fe0f", "rowing_woman_light_skin_tone": "1f6a3-1f3fb-200d-2640-fe0f", "woman-rowing-boat_medium_light_skin_tone": "1f6a3-1f3fc-200d-2640-fe0f", "rowing_woman_medium_light_skin_tone": "1f6a3-1f3fc-200d-2640-fe0f", "woman-rowing-boat_medium_skin_tone": "1f6a3-1f3fd-200d-2640-fe0f", "rowing_woman_medium_skin_tone": "1f6a3-1f3fd-200d-2640-fe0f", "woman-rowing-boat_medium_dark_skin_tone": "1f6a3-1f3fe-200d-2640-fe0f", "rowing_woman_medium_dark_skin_tone": "1f6a3-1f3fe-200d-2640-fe0f", "woman-rowing-boat_dark_skin_tone": "1f6a3-1f3ff-200d-2640-fe0f", "rowing_woman_dark_skin_tone": "1f6a3-1f3ff-200d-2640-fe0f", "man-rowing-boat_light_skin_tone": "1f6a3-1f3fb-200d-2642-fe0f", "rowing_man_light_skin_tone": "1f6a3-1f3fb-200d-2642-fe0f", "man-rowing-boat_medium_light_skin_tone": "1f6a3-1f3fc-200d-2642-fe0f", "rowing_man_medium_light_skin_tone": "1f6a3-1f3fc-200d-2642-fe0f", "man-rowing-boat_medium_skin_tone": "1f6a3-1f3fd-200d-2642-fe0f", "rowing_man_medium_skin_tone": "1f6a3-1f3fd-200d-2642-fe0f", "man-rowing-boat_medium_dark_skin_tone": "1f6a3-1f3fe-200d-2642-fe0f", "rowing_man_medium_dark_skin_tone": "1f6a3-1f3fe-200d-2642-fe0f", "man-rowing-boat_dark_skin_tone": "1f6a3-1f3ff-200d-2642-fe0f", "rowing_man_dark_skin_tone": "1f6a3-1f3ff-200d-2642-fe0f", "rowboat_light_skin_tone": "1f6a3-1f3fb", "rowboat_medium_light_skin_tone": "1f6a3-1f3fc", "rowboat_medium_skin_tone": "1f6a3-1f3fd", "rowboat_medium_dark_skin_tone": "1f6a3-1f3fe", "rowboat_dark_skin_tone": "1f6a3-1f3ff", "woman-biking_light_skin_tone": "1f6b4-1f3fb-200d-2640-fe0f", "biking_woman_light_skin_tone": "1f6b4-1f3fb-200d-2640-fe0f", "woman-biking_medium_light_skin_tone": "1f6b4-1f3fc-200d-2640-fe0f", "biking_woman_medium_light_skin_tone": "1f6b4-1f3fc-200d-2640-fe0f", "woman-biking_medium_skin_tone": "1f6b4-1f3fd-200d-2640-fe0f", "biking_woman_medium_skin_tone": "1f6b4-1f3fd-200d-2640-fe0f", "woman-biking_medium_dark_skin_tone": "1f6b4-1f3fe-200d-2640-fe0f", "biking_woman_medium_dark_skin_tone": "1f6b4-1f3fe-200d-2640-fe0f", "woman-biking_dark_skin_tone": "1f6b4-1f3ff-200d-2640-fe0f", "biking_woman_dark_skin_tone": "1f6b4-1f3ff-200d-2640-fe0f", "man-biking_light_skin_tone": "1f6b4-1f3fb-200d-2642-fe0f", "biking_man_light_skin_tone": "1f6b4-1f3fb-200d-2642-fe0f", "man-biking_medium_light_skin_tone": "1f6b4-1f3fc-200d-2642-fe0f", "biking_man_medium_light_skin_tone": "1f6b4-1f3fc-200d-2642-fe0f", "man-biking_medium_skin_tone": "1f6b4-1f3fd-200d-2642-fe0f", "biking_man_medium_skin_tone": "1f6b4-1f3fd-200d-2642-fe0f", "man-biking_medium_dark_skin_tone": "1f6b4-1f3fe-200d-2642-fe0f", "biking_man_medium_dark_skin_tone": "1f6b4-1f3fe-200d-2642-fe0f", "man-biking_dark_skin_tone": "1f6b4-1f3ff-200d-2642-fe0f", "biking_man_dark_skin_tone": "1f6b4-1f3ff-200d-2642-fe0f", "bicyclist_light_skin_tone": "1f6b4-1f3fb", "bicyclist_medium_light_skin_tone": "1f6b4-1f3fc", "bicyclist_medium_skin_tone": "1f6b4-1f3fd", "bicyclist_medium_dark_skin_tone": "1f6b4-1f3fe", "bicyclist_dark_skin_tone": "1f6b4-1f3ff", "woman-mountain-biking_light_skin_tone": "1f6b5-1f3fb-200d-2640-fe0f", "mountain_biking_woman_light_skin_tone": "1f6b5-1f3fb-200d-2640-fe0f", "woman-mountain-biking_medium_light_skin_tone": "1f6b5-1f3fc-200d-2640-fe0f", "mountain_biking_woman_medium_light_skin_tone": "1f6b5-1f3fc-200d-2640-fe0f", "woman-mountain-biking_medium_skin_tone": "1f6b5-1f3fd-200d-2640-fe0f", "mountain_biking_woman_medium_skin_tone": "1f6b5-1f3fd-200d-2640-fe0f", "woman-mountain-biking_medium_dark_skin_tone": "1f6b5-1f3fe-200d-2640-fe0f", "mountain_biking_woman_medium_dark_skin_tone": "1f6b5-1f3fe-200d-2640-fe0f", "woman-mountain-biking_dark_skin_tone": "1f6b5-1f3ff-200d-2640-fe0f", "mountain_biking_woman_dark_skin_tone": "1f6b5-1f3ff-200d-2640-fe0f", "man-mountain-biking_light_skin_tone": "1f6b5-1f3fb-200d-2642-fe0f", "mountain_biking_man_light_skin_tone": "1f6b5-1f3fb-200d-2642-fe0f", "man-mountain-biking_medium_light_skin_tone": "1f6b5-1f3fc-200d-2642-fe0f", "mountain_biking_man_medium_light_skin_tone": "1f6b5-1f3fc-200d-2642-fe0f", "man-mountain-biking_medium_skin_tone": "1f6b5-1f3fd-200d-2642-fe0f", "mountain_biking_man_medium_skin_tone": "1f6b5-1f3fd-200d-2642-fe0f", "man-mountain-biking_medium_dark_skin_tone": "1f6b5-1f3fe-200d-2642-fe0f", "mountain_biking_man_medium_dark_skin_tone": "1f6b5-1f3fe-200d-2642-fe0f", "man-mountain-biking_dark_skin_tone": "1f6b5-1f3ff-200d-2642-fe0f", "mountain_biking_man_dark_skin_tone": "1f6b5-1f3ff-200d-2642-fe0f", "mountain_bicyclist_light_skin_tone": "1f6b5-1f3fb", "mountain_bicyclist_medium_light_skin_tone": "1f6b5-1f3fc", "mountain_bicyclist_medium_skin_tone": "1f6b5-1f3fd", "mountain_bicyclist_medium_dark_skin_tone": "1f6b5-1f3fe", "mountain_bicyclist_dark_skin_tone": "1f6b5-1f3ff", "woman-walking_light_skin_tone": "1f6b6-1f3fb-200d-2640-fe0f", "walking_woman_light_skin_tone": "1f6b6-1f3fb-200d-2640-fe0f", "woman-walking_medium_light_skin_tone": "1f6b6-1f3fc-200d-2640-fe0f", "walking_woman_medium_light_skin_tone": "1f6b6-1f3fc-200d-2640-fe0f", "woman-walking_medium_skin_tone": "1f6b6-1f3fd-200d-2640-fe0f", "walking_woman_medium_skin_tone": "1f6b6-1f3fd-200d-2640-fe0f", "woman-walking_medium_dark_skin_tone": "1f6b6-1f3fe-200d-2640-fe0f", "walking_woman_medium_dark_skin_tone": "1f6b6-1f3fe-200d-2640-fe0f", "woman-walking_dark_skin_tone": "1f6b6-1f3ff-200d-2640-fe0f", "walking_woman_dark_skin_tone": "1f6b6-1f3ff-200d-2640-fe0f", "man-walking_light_skin_tone": "1f6b6-1f3fb-200d-2642-fe0f", "walking_man_light_skin_tone": "1f6b6-1f3fb-200d-2642-fe0f", "man-walking_medium_light_skin_tone": "1f6b6-1f3fc-200d-2642-fe0f", "walking_man_medium_light_skin_tone": "1f6b6-1f3fc-200d-2642-fe0f", "man-walking_medium_skin_tone": "1f6b6-1f3fd-200d-2642-fe0f", "walking_man_medium_skin_tone": "1f6b6-1f3fd-200d-2642-fe0f", "man-walking_medium_dark_skin_tone": "1f6b6-1f3fe-200d-2642-fe0f", "walking_man_medium_dark_skin_tone": "1f6b6-1f3fe-200d-2642-fe0f", "man-walking_dark_skin_tone": "1f6b6-1f3ff-200d-2642-fe0f", "walking_man_dark_skin_tone": "1f6b6-1f3ff-200d-2642-fe0f", "walking_light_skin_tone": "1f6b6-1f3fb", "walking_medium_light_skin_tone": "1f6b6-1f3fc", "walking_medium_skin_tone": "1f6b6-1f3fd", "walking_medium_dark_skin_tone": "1f6b6-1f3fe", "walking_dark_skin_tone": "1f6b6-1f3ff", "bath_light_skin_tone": "1f6c0-1f3fb", "bath_medium_light_skin_tone": "1f6c0-1f3fc", "bath_medium_skin_tone": "1f6c0-1f3fd", "bath_medium_dark_skin_tone": "1f6c0-1f3fe", "bath_dark_skin_tone": "1f6c0-1f3ff", "sleeping_accommodation_light_skin_tone": "1f6cc-1f3fb", "sleeping_accommodation_medium_light_skin_tone": "1f6cc-1f3fc", "sleeping_accommodation_medium_skin_tone": "1f6cc-1f3fd", "sleeping_accommodation_medium_dark_skin_tone": "1f6cc-1f3fe", "sleeping_accommodation_dark_skin_tone": "1f6cc-1f3ff", "pinched_fingers_light_skin_tone": "1f90c-1f3fb", "pinched_fingers_medium_light_skin_tone": "1f90c-1f3fc", "pinched_fingers_medium_skin_tone": "1f90c-1f3fd", "pinched_fingers_medium_dark_skin_tone": "1f90c-1f3fe", "pinched_fingers_dark_skin_tone": "1f90c-1f3ff", "pinching_hand_light_skin_tone": "1f90f-1f3fb", "pinching_hand_medium_light_skin_tone": "1f90f-1f3fc", "pinching_hand_medium_skin_tone": "1f90f-1f3fd", "pinching_hand_medium_dark_skin_tone": "1f90f-1f3fe", "pinching_hand_dark_skin_tone": "1f90f-1f3ff", "the_horns_light_skin_tone": "1f918-1f3fb", "sign_of_the_horns_light_skin_tone": "1f918-1f3fb", "metal_light_skin_tone": "1f918-1f3fb", "the_horns_medium_light_skin_tone": "1f918-1f3fc", "sign_of_the_horns_medium_light_skin_tone": "1f918-1f3fc", "metal_medium_light_skin_tone": "1f918-1f3fc", "the_horns_medium_skin_tone": "1f918-1f3fd", "sign_of_the_horns_medium_skin_tone": "1f918-1f3fd", "metal_medium_skin_tone": "1f918-1f3fd", "the_horns_medium_dark_skin_tone": "1f918-1f3fe", "sign_of_the_horns_medium_dark_skin_tone": "1f918-1f3fe", "metal_medium_dark_skin_tone": "1f918-1f3fe", "the_horns_dark_skin_tone": "1f918-1f3ff", "sign_of_the_horns_dark_skin_tone": "1f918-1f3ff", "metal_dark_skin_tone": "1f918-1f3ff", "call_me_hand_light_skin_tone": "1f919-1f3fb", "call_me_hand_medium_light_skin_tone": "1f919-1f3fc", "call_me_hand_medium_skin_tone": "1f919-1f3fd", "call_me_hand_medium_dark_skin_tone": "1f919-1f3fe", "call_me_hand_dark_skin_tone": "1f919-1f3ff", "raised_back_of_hand_light_skin_tone": "1f91a-1f3fb", "raised_back_of_hand_medium_light_skin_tone": "1f91a-1f3fc", "raised_back_of_hand_medium_skin_tone": "1f91a-1f3fd", "raised_back_of_hand_medium_dark_skin_tone": "1f91a-1f3fe", "raised_back_of_hand_dark_skin_tone": "1f91a-1f3ff", "left-facing_fist_light_skin_tone": "1f91b-1f3fb", "fist_left_light_skin_tone": "1f91b-1f3fb", "left-facing_fist_medium_light_skin_tone": "1f91b-1f3fc", "fist_left_medium_light_skin_tone": "1f91b-1f3fc", "left-facing_fist_medium_skin_tone": "1f91b-1f3fd", "fist_left_medium_skin_tone": "1f91b-1f3fd", "left-facing_fist_medium_dark_skin_tone": "1f91b-1f3fe", "fist_left_medium_dark_skin_tone": "1f91b-1f3fe", "left-facing_fist_dark_skin_tone": "1f91b-1f3ff", "fist_left_dark_skin_tone": "1f91b-1f3ff", "right-facing_fist_light_skin_tone": "1f91c-1f3fb", "fist_right_light_skin_tone": "1f91c-1f3fb", "right-facing_fist_medium_light_skin_tone": "1f91c-1f3fc", "fist_right_medium_light_skin_tone": "1f91c-1f3fc", "right-facing_fist_medium_skin_tone": "1f91c-1f3fd", "fist_right_medium_skin_tone": "1f91c-1f3fd", "right-facing_fist_medium_dark_skin_tone": "1f91c-1f3fe", "fist_right_medium_dark_skin_tone": "1f91c-1f3fe", "right-facing_fist_dark_skin_tone": "1f91c-1f3ff", "fist_right_dark_skin_tone": "1f91c-1f3ff", "crossed_fingers_light_skin_tone": "1f91e-1f3fb", "hand_with_index_and_middle_fingers_crossed_light_skin_tone": "1f91e-1f3fb", "crossed_fingers_medium_light_skin_tone": "1f91e-1f3fc", "hand_with_index_and_middle_fingers_crossed_medium_light_skin_tone": "1f91e-1f3fc", "crossed_fingers_medium_skin_tone": "1f91e-1f3fd", "hand_with_index_and_middle_fingers_crossed_medium_skin_tone": "1f91e-1f3fd", "crossed_fingers_medium_dark_skin_tone": "1f91e-1f3fe", "hand_with_index_and_middle_fingers_crossed_medium_dark_skin_tone": "1f91e-1f3fe", "crossed_fingers_dark_skin_tone": "1f91e-1f3ff", "hand_with_index_and_middle_fingers_crossed_dark_skin_tone": "1f91e-1f3ff", "i_love_you_hand_sign_light_skin_tone": "1f91f-1f3fb", "i_love_you_hand_sign_medium_light_skin_tone": "1f91f-1f3fc", "i_love_you_hand_sign_medium_skin_tone": "1f91f-1f3fd", "i_love_you_hand_sign_medium_dark_skin_tone": "1f91f-1f3fe", "i_love_you_hand_sign_dark_skin_tone": "1f91f-1f3ff", "woman-facepalming_light_skin_tone": "1f926-1f3fb-200d-2640-fe0f", "woman_facepalming_light_skin_tone": "1f926-1f3fb-200d-2640-fe0f", "woman-facepalming_medium_light_skin_tone": "1f926-1f3fc-200d-2640-fe0f", "woman_facepalming_medium_light_skin_tone": "1f926-1f3fc-200d-2640-fe0f", "woman-facepalming_medium_skin_tone": "1f926-1f3fd-200d-2640-fe0f", "woman_facepalming_medium_skin_tone": "1f926-1f3fd-200d-2640-fe0f", "woman-facepalming_medium_dark_skin_tone": "1f926-1f3fe-200d-2640-fe0f", "woman_facepalming_medium_dark_skin_tone": "1f926-1f3fe-200d-2640-fe0f", "woman-facepalming_dark_skin_tone": "1f926-1f3ff-200d-2640-fe0f", "woman_facepalming_dark_skin_tone": "1f926-1f3ff-200d-2640-fe0f", "man-facepalming_light_skin_tone": "1f926-1f3fb-200d-2642-fe0f", "man_facepalming_light_skin_tone": "1f926-1f3fb-200d-2642-fe0f", "man-facepalming_medium_light_skin_tone": "1f926-1f3fc-200d-2642-fe0f", "man_facepalming_medium_light_skin_tone": "1f926-1f3fc-200d-2642-fe0f", "man-facepalming_medium_skin_tone": "1f926-1f3fd-200d-2642-fe0f", "man_facepalming_medium_skin_tone": "1f926-1f3fd-200d-2642-fe0f", "man-facepalming_medium_dark_skin_tone": "1f926-1f3fe-200d-2642-fe0f", "man_facepalming_medium_dark_skin_tone": "1f926-1f3fe-200d-2642-fe0f", "man-facepalming_dark_skin_tone": "1f926-1f3ff-200d-2642-fe0f", "man_facepalming_dark_skin_tone": "1f926-1f3ff-200d-2642-fe0f", "face_palm_light_skin_tone": "1f926-1f3fb", "face_palm_medium_light_skin_tone": "1f926-1f3fc", "face_palm_medium_skin_tone": "1f926-1f3fd", "face_palm_medium_dark_skin_tone": "1f926-1f3fe", "face_palm_dark_skin_tone": "1f926-1f3ff", "pregnant_woman_light_skin_tone": "1f930-1f3fb", "pregnant_woman_medium_light_skin_tone": "1f930-1f3fc", "pregnant_woman_medium_skin_tone": "1f930-1f3fd", "pregnant_woman_medium_dark_skin_tone": "1f930-1f3fe", "pregnant_woman_dark_skin_tone": "1f930-1f3ff", "breast-feeding_light_skin_tone": "1f931-1f3fb", "breast-feeding_medium_light_skin_tone": "1f931-1f3fc", "breast-feeding_medium_skin_tone": "1f931-1f3fd", "breast-feeding_medium_dark_skin_tone": "1f931-1f3fe", "breast-feeding_dark_skin_tone": "1f931-1f3ff", "palms_up_together_light_skin_tone": "1f932-1f3fb", "palms_up_together_medium_light_skin_tone": "1f932-1f3fc", "palms_up_together_medium_skin_tone": "1f932-1f3fd", "palms_up_together_medium_dark_skin_tone": "1f932-1f3fe", "palms_up_together_dark_skin_tone": "1f932-1f3ff", "selfie_light_skin_tone": "1f933-1f3fb", "selfie_medium_light_skin_tone": "1f933-1f3fc", "selfie_medium_skin_tone": "1f933-1f3fd", "selfie_medium_dark_skin_tone": "1f933-1f3fe", "selfie_dark_skin_tone": "1f933-1f3ff", "prince_light_skin_tone": "1f934-1f3fb", "prince_medium_light_skin_tone": "1f934-1f3fc", "prince_medium_skin_tone": "1f934-1f3fd", "prince_medium_dark_skin_tone": "1f934-1f3fe", "prince_dark_skin_tone": "1f934-1f3ff", "woman_in_tuxedo_light_skin_tone": "1f935-1f3fb-200d-2640-fe0f", "woman_in_tuxedo_medium_light_skin_tone": "1f935-1f3fc-200d-2640-fe0f", "woman_in_tuxedo_medium_skin_tone": "1f935-1f3fd-200d-2640-fe0f", "woman_in_tuxedo_medium_dark_skin_tone": "1f935-1f3fe-200d-2640-fe0f", "woman_in_tuxedo_dark_skin_tone": "1f935-1f3ff-200d-2640-fe0f", "man_in_tuxedo_light_skin_tone": "1f935-1f3fb-200d-2642-fe0f", "man_in_tuxedo_medium_light_skin_tone": "1f935-1f3fc-200d-2642-fe0f", "man_in_tuxedo_medium_skin_tone": "1f935-1f3fd-200d-2642-fe0f", "man_in_tuxedo_medium_dark_skin_tone": "1f935-1f3fe-200d-2642-fe0f", "man_in_tuxedo_dark_skin_tone": "1f935-1f3ff-200d-2642-fe0f", "person_in_tuxedo_light_skin_tone": "1f935-1f3fb", "person_in_tuxedo_medium_light_skin_tone": "1f935-1f3fc", "person_in_tuxedo_medium_skin_tone": "1f935-1f3fd", "person_in_tuxedo_medium_dark_skin_tone": "1f935-1f3fe", "person_in_tuxedo_dark_skin_tone": "1f935-1f3ff", "mrs_claus_light_skin_tone": "1f936-1f3fb", "mother_christmas_light_skin_tone": "1f936-1f3fb", "mrs_claus_medium_light_skin_tone": "1f936-1f3fc", "mother_christmas_medium_light_skin_tone": "1f936-1f3fc", "mrs_claus_medium_skin_tone": "1f936-1f3fd", "mother_christmas_medium_skin_tone": "1f936-1f3fd", "mrs_claus_medium_dark_skin_tone": "1f936-1f3fe", "mother_christmas_medium_dark_skin_tone": "1f936-1f3fe", "mrs_claus_dark_skin_tone": "1f936-1f3ff", "mother_christmas_dark_skin_tone": "1f936-1f3ff", "woman-shrugging_light_skin_tone": "1f937-1f3fb-200d-2640-fe0f", "woman_shrugging_light_skin_tone": "1f937-1f3fb-200d-2640-fe0f", "woman-shrugging_medium_light_skin_tone": "1f937-1f3fc-200d-2640-fe0f", "woman_shrugging_medium_light_skin_tone": "1f937-1f3fc-200d-2640-fe0f", "woman-shrugging_medium_skin_tone": "1f937-1f3fd-200d-2640-fe0f", "woman_shrugging_medium_skin_tone": "1f937-1f3fd-200d-2640-fe0f", "woman-shrugging_medium_dark_skin_tone": "1f937-1f3fe-200d-2640-fe0f", "woman_shrugging_medium_dark_skin_tone": "1f937-1f3fe-200d-2640-fe0f", "woman-shrugging_dark_skin_tone": "1f937-1f3ff-200d-2640-fe0f", "woman_shrugging_dark_skin_tone": "1f937-1f3ff-200d-2640-fe0f", "man-shrugging_light_skin_tone": "1f937-1f3fb-200d-2642-fe0f", "man_shrugging_light_skin_tone": "1f937-1f3fb-200d-2642-fe0f", "man-shrugging_medium_light_skin_tone": "1f937-1f3fc-200d-2642-fe0f", "man_shrugging_medium_light_skin_tone": "1f937-1f3fc-200d-2642-fe0f", "man-shrugging_medium_skin_tone": "1f937-1f3fd-200d-2642-fe0f", "man_shrugging_medium_skin_tone": "1f937-1f3fd-200d-2642-fe0f", "man-shrugging_medium_dark_skin_tone": "1f937-1f3fe-200d-2642-fe0f", "man_shrugging_medium_dark_skin_tone": "1f937-1f3fe-200d-2642-fe0f", "man-shrugging_dark_skin_tone": "1f937-1f3ff-200d-2642-fe0f", "man_shrugging_dark_skin_tone": "1f937-1f3ff-200d-2642-fe0f", "shrug_light_skin_tone": "1f937-1f3fb", "shrug_medium_light_skin_tone": "1f937-1f3fc", "shrug_medium_skin_tone": "1f937-1f3fd", "shrug_medium_dark_skin_tone": "1f937-1f3fe", "shrug_dark_skin_tone": "1f937-1f3ff", "woman-cartwheeling_light_skin_tone": "1f938-1f3fb-200d-2640-fe0f", "woman_cartwheeling_light_skin_tone": "1f938-1f3fb-200d-2640-fe0f", "woman-cartwheeling_medium_light_skin_tone": "1f938-1f3fc-200d-2640-fe0f", "woman_cartwheeling_medium_light_skin_tone": "1f938-1f3fc-200d-2640-fe0f", "woman-cartwheeling_medium_skin_tone": "1f938-1f3fd-200d-2640-fe0f", "woman_cartwheeling_medium_skin_tone": "1f938-1f3fd-200d-2640-fe0f", "woman-cartwheeling_medium_dark_skin_tone": "1f938-1f3fe-200d-2640-fe0f", "woman_cartwheeling_medium_dark_skin_tone": "1f938-1f3fe-200d-2640-fe0f", "woman-cartwheeling_dark_skin_tone": "1f938-1f3ff-200d-2640-fe0f", "woman_cartwheeling_dark_skin_tone": "1f938-1f3ff-200d-2640-fe0f", "man-cartwheeling_light_skin_tone": "1f938-1f3fb-200d-2642-fe0f", "man_cartwheeling_light_skin_tone": "1f938-1f3fb-200d-2642-fe0f", "man-cartwheeling_medium_light_skin_tone": "1f938-1f3fc-200d-2642-fe0f", "man_cartwheeling_medium_light_skin_tone": "1f938-1f3fc-200d-2642-fe0f", "man-cartwheeling_medium_skin_tone": "1f938-1f3fd-200d-2642-fe0f", "man_cartwheeling_medium_skin_tone": "1f938-1f3fd-200d-2642-fe0f", "man-cartwheeling_medium_dark_skin_tone": "1f938-1f3fe-200d-2642-fe0f", "man_cartwheeling_medium_dark_skin_tone": "1f938-1f3fe-200d-2642-fe0f", "man-cartwheeling_dark_skin_tone": "1f938-1f3ff-200d-2642-fe0f", "man_cartwheeling_dark_skin_tone": "1f938-1f3ff-200d-2642-fe0f", "person_doing_cartwheel_light_skin_tone": "1f938-1f3fb", "person_doing_cartwheel_medium_light_skin_tone": "1f938-1f3fc", "person_doing_cartwheel_medium_skin_tone": "1f938-1f3fd", "person_doing_cartwheel_medium_dark_skin_tone": "1f938-1f3fe", "person_doing_cartwheel_dark_skin_tone": "1f938-1f3ff", "woman-juggling_light_skin_tone": "1f939-1f3fb-200d-2640-fe0f", "woman_juggling_light_skin_tone": "1f939-1f3fb-200d-2640-fe0f", "woman-juggling_medium_light_skin_tone": "1f939-1f3fc-200d-2640-fe0f", "woman_juggling_medium_light_skin_tone": "1f939-1f3fc-200d-2640-fe0f", "woman-juggling_medium_skin_tone": "1f939-1f3fd-200d-2640-fe0f", "woman_juggling_medium_skin_tone": "1f939-1f3fd-200d-2640-fe0f", "woman-juggling_medium_dark_skin_tone": "1f939-1f3fe-200d-2640-fe0f", "woman_juggling_medium_dark_skin_tone": "1f939-1f3fe-200d-2640-fe0f", "woman-juggling_dark_skin_tone": "1f939-1f3ff-200d-2640-fe0f", "woman_juggling_dark_skin_tone": "1f939-1f3ff-200d-2640-fe0f", "man-juggling_light_skin_tone": "1f939-1f3fb-200d-2642-fe0f", "man_juggling_light_skin_tone": "1f939-1f3fb-200d-2642-fe0f", "man-juggling_medium_light_skin_tone": "1f939-1f3fc-200d-2642-fe0f", "man_juggling_medium_light_skin_tone": "1f939-1f3fc-200d-2642-fe0f", "man-juggling_medium_skin_tone": "1f939-1f3fd-200d-2642-fe0f", "man_juggling_medium_skin_tone": "1f939-1f3fd-200d-2642-fe0f", "man-juggling_medium_dark_skin_tone": "1f939-1f3fe-200d-2642-fe0f", "man_juggling_medium_dark_skin_tone": "1f939-1f3fe-200d-2642-fe0f", "man-juggling_dark_skin_tone": "1f939-1f3ff-200d-2642-fe0f", "man_juggling_dark_skin_tone": "1f939-1f3ff-200d-2642-fe0f", "juggling_light_skin_tone": "1f939-1f3fb", "juggling_medium_light_skin_tone": "1f939-1f3fc", "juggling_medium_skin_tone": "1f939-1f3fd", "juggling_medium_dark_skin_tone": "1f939-1f3fe", "juggling_dark_skin_tone": "1f939-1f3ff", "woman-playing-water-polo_light_skin_tone": "1f93d-1f3fb-200d-2640-fe0f", "woman_playing_water_polo_light_skin_tone": "1f93d-1f3fb-200d-2640-fe0f", "woman-playing-water-polo_medium_light_skin_tone": "1f93d-1f3fc-200d-2640-fe0f", "woman_playing_water_polo_medium_light_skin_tone": "1f93d-1f3fc-200d-2640-fe0f", "woman-playing-water-polo_medium_skin_tone": "1f93d-1f3fd-200d-2640-fe0f", "woman_playing_water_polo_medium_skin_tone": "1f93d-1f3fd-200d-2640-fe0f", "woman-playing-water-polo_medium_dark_skin_tone": "1f93d-1f3fe-200d-2640-fe0f", "woman_playing_water_polo_medium_dark_skin_tone": "1f93d-1f3fe-200d-2640-fe0f", "woman-playing-water-polo_dark_skin_tone": "1f93d-1f3ff-200d-2640-fe0f", "woman_playing_water_polo_dark_skin_tone": "1f93d-1f3ff-200d-2640-fe0f", "man-playing-water-polo_light_skin_tone": "1f93d-1f3fb-200d-2642-fe0f", "man_playing_water_polo_light_skin_tone": "1f93d-1f3fb-200d-2642-fe0f", "man-playing-water-polo_medium_light_skin_tone": "1f93d-1f3fc-200d-2642-fe0f", "man_playing_water_polo_medium_light_skin_tone": "1f93d-1f3fc-200d-2642-fe0f", "man-playing-water-polo_medium_skin_tone": "1f93d-1f3fd-200d-2642-fe0f", "man_playing_water_polo_medium_skin_tone": "1f93d-1f3fd-200d-2642-fe0f", "man-playing-water-polo_medium_dark_skin_tone": "1f93d-1f3fe-200d-2642-fe0f", "man_playing_water_polo_medium_dark_skin_tone": "1f93d-1f3fe-200d-2642-fe0f", "man-playing-water-polo_dark_skin_tone": "1f93d-1f3ff-200d-2642-fe0f", "man_playing_water_polo_dark_skin_tone": "1f93d-1f3ff-200d-2642-fe0f", "water_polo_light_skin_tone": "1f93d-1f3fb", "water_polo_medium_light_skin_tone": "1f93d-1f3fc", "water_polo_medium_skin_tone": "1f93d-1f3fd", "water_polo_medium_dark_skin_tone": "1f93d-1f3fe", "water_polo_dark_skin_tone": "1f93d-1f3ff", "woman-playing-handball_light_skin_tone": "1f93e-1f3fb-200d-2640-fe0f", "woman_playing_handball_light_skin_tone": "1f93e-1f3fb-200d-2640-fe0f", "woman-playing-handball_medium_light_skin_tone": "1f93e-1f3fc-200d-2640-fe0f", "woman_playing_handball_medium_light_skin_tone": "1f93e-1f3fc-200d-2640-fe0f", "woman-playing-handball_medium_skin_tone": "1f93e-1f3fd-200d-2640-fe0f", "woman_playing_handball_medium_skin_tone": "1f93e-1f3fd-200d-2640-fe0f", "woman-playing-handball_medium_dark_skin_tone": "1f93e-1f3fe-200d-2640-fe0f", "woman_playing_handball_medium_dark_skin_tone": "1f93e-1f3fe-200d-2640-fe0f", "woman-playing-handball_dark_skin_tone": "1f93e-1f3ff-200d-2640-fe0f", "woman_playing_handball_dark_skin_tone": "1f93e-1f3ff-200d-2640-fe0f", "man-playing-handball_light_skin_tone": "1f93e-1f3fb-200d-2642-fe0f", "man_playing_handball_light_skin_tone": "1f93e-1f3fb-200d-2642-fe0f", "man-playing-handball_medium_light_skin_tone": "1f93e-1f3fc-200d-2642-fe0f", "man_playing_handball_medium_light_skin_tone": "1f93e-1f3fc-200d-2642-fe0f", "man-playing-handball_medium_skin_tone": "1f93e-1f3fd-200d-2642-fe0f", "man_playing_handball_medium_skin_tone": "1f93e-1f3fd-200d-2642-fe0f", "man-playing-handball_medium_dark_skin_tone": "1f93e-1f3fe-200d-2642-fe0f", "man_playing_handball_medium_dark_skin_tone": "1f93e-1f3fe-200d-2642-fe0f", "man-playing-handball_dark_skin_tone": "1f93e-1f3ff-200d-2642-fe0f", "man_playing_handball_dark_skin_tone": "1f93e-1f3ff-200d-2642-fe0f", "handball_light_skin_tone": "1f93e-1f3fb", "handball_medium_light_skin_tone": "1f93e-1f3fc", "handball_medium_skin_tone": "1f93e-1f3fd", "handball_medium_dark_skin_tone": "1f93e-1f3fe", "handball_dark_skin_tone": "1f93e-1f3ff", "ninja_light_skin_tone": "1f977-1f3fb", "ninja_medium_light_skin_tone": "1f977-1f3fc", "ninja_medium_skin_tone": "1f977-1f3fd", "ninja_medium_dark_skin_tone": "1f977-1f3fe", "ninja_dark_skin_tone": "1f977-1f3ff", "leg_light_skin_tone": "1f9b5-1f3fb", "leg_medium_light_skin_tone": "1f9b5-1f3fc", "leg_medium_skin_tone": "1f9b5-1f3fd", "leg_medium_dark_skin_tone": "1f9b5-1f3fe", "leg_dark_skin_tone": "1f9b5-1f3ff", "foot_light_skin_tone": "1f9b6-1f3fb", "foot_medium_light_skin_tone": "1f9b6-1f3fc", "foot_medium_skin_tone": "1f9b6-1f3fd", "foot_medium_dark_skin_tone": "1f9b6-1f3fe", "foot_dark_skin_tone": "1f9b6-1f3ff", "female_superhero_light_skin_tone": "1f9b8-1f3fb-200d-2640-fe0f", "female_superhero_medium_light_skin_tone": "1f9b8-1f3fc-200d-2640-fe0f", "female_superhero_medium_skin_tone": "1f9b8-1f3fd-200d-2640-fe0f", "female_superhero_medium_dark_skin_tone": "1f9b8-1f3fe-200d-2640-fe0f", "female_superhero_dark_skin_tone": "1f9b8-1f3ff-200d-2640-fe0f", "male_superhero_light_skin_tone": "1f9b8-1f3fb-200d-2642-fe0f", "male_superhero_medium_light_skin_tone": "1f9b8-1f3fc-200d-2642-fe0f", "male_superhero_medium_skin_tone": "1f9b8-1f3fd-200d-2642-fe0f", "male_superhero_medium_dark_skin_tone": "1f9b8-1f3fe-200d-2642-fe0f", "male_superhero_dark_skin_tone": "1f9b8-1f3ff-200d-2642-fe0f", "superhero_light_skin_tone": "1f9b8-1f3fb", "superhero_medium_light_skin_tone": "1f9b8-1f3fc", "superhero_medium_skin_tone": "1f9b8-1f3fd", "superhero_medium_dark_skin_tone": "1f9b8-1f3fe", "superhero_dark_skin_tone": "1f9b8-1f3ff", "female_supervillain_light_skin_tone": "1f9b9-1f3fb-200d-2640-fe0f", "female_supervillain_medium_light_skin_tone": "1f9b9-1f3fc-200d-2640-fe0f", "female_supervillain_medium_skin_tone": "1f9b9-1f3fd-200d-2640-fe0f", "female_supervillain_medium_dark_skin_tone": "1f9b9-1f3fe-200d-2640-fe0f", "female_supervillain_dark_skin_tone": "1f9b9-1f3ff-200d-2640-fe0f", "male_supervillain_light_skin_tone": "1f9b9-1f3fb-200d-2642-fe0f", "male_supervillain_medium_light_skin_tone": "1f9b9-1f3fc-200d-2642-fe0f", "male_supervillain_medium_skin_tone": "1f9b9-1f3fd-200d-2642-fe0f", "male_supervillain_medium_dark_skin_tone": "1f9b9-1f3fe-200d-2642-fe0f", "male_supervillain_dark_skin_tone": "1f9b9-1f3ff-200d-2642-fe0f", "supervillain_light_skin_tone": "1f9b9-1f3fb", "supervillain_medium_light_skin_tone": "1f9b9-1f3fc", "supervillain_medium_skin_tone": "1f9b9-1f3fd", "supervillain_medium_dark_skin_tone": "1f9b9-1f3fe", "supervillain_dark_skin_tone": "1f9b9-1f3ff", "ear_with_hearing_aid_light_skin_tone": "1f9bb-1f3fb", "ear_with_hearing_aid_medium_light_skin_tone": "1f9bb-1f3fc", "ear_with_hearing_aid_medium_skin_tone": "1f9bb-1f3fd", "ear_with_hearing_aid_medium_dark_skin_tone": "1f9bb-1f3fe", "ear_with_hearing_aid_dark_skin_tone": "1f9bb-1f3ff", "woman_standing_light_skin_tone": "1f9cd-1f3fb-200d-2640-fe0f", "woman_standing_medium_light_skin_tone": "1f9cd-1f3fc-200d-2640-fe0f", "woman_standing_medium_skin_tone": "1f9cd-1f3fd-200d-2640-fe0f", "woman_standing_medium_dark_skin_tone": "1f9cd-1f3fe-200d-2640-fe0f", "woman_standing_dark_skin_tone": "1f9cd-1f3ff-200d-2640-fe0f", "man_standing_light_skin_tone": "1f9cd-1f3fb-200d-2642-fe0f", "man_standing_medium_light_skin_tone": "1f9cd-1f3fc-200d-2642-fe0f", "man_standing_medium_skin_tone": "1f9cd-1f3fd-200d-2642-fe0f", "man_standing_medium_dark_skin_tone": "1f9cd-1f3fe-200d-2642-fe0f", "man_standing_dark_skin_tone": "1f9cd-1f3ff-200d-2642-fe0f", "standing_person_light_skin_tone": "1f9cd-1f3fb", "standing_person_medium_light_skin_tone": "1f9cd-1f3fc", "standing_person_medium_skin_tone": "1f9cd-1f3fd", "standing_person_medium_dark_skin_tone": "1f9cd-1f3fe", "standing_person_dark_skin_tone": "1f9cd-1f3ff", "woman_kneeling_light_skin_tone": "1f9ce-1f3fb-200d-2640-fe0f", "woman_kneeling_medium_light_skin_tone": "1f9ce-1f3fc-200d-2640-fe0f", "woman_kneeling_medium_skin_tone": "1f9ce-1f3fd-200d-2640-fe0f", "woman_kneeling_medium_dark_skin_tone": "1f9ce-1f3fe-200d-2640-fe0f", "woman_kneeling_dark_skin_tone": "1f9ce-1f3ff-200d-2640-fe0f", "man_kneeling_light_skin_tone": "1f9ce-1f3fb-200d-2642-fe0f", "man_kneeling_medium_light_skin_tone": "1f9ce-1f3fc-200d-2642-fe0f", "man_kneeling_medium_skin_tone": "1f9ce-1f3fd-200d-2642-fe0f", "man_kneeling_medium_dark_skin_tone": "1f9ce-1f3fe-200d-2642-fe0f", "man_kneeling_dark_skin_tone": "1f9ce-1f3ff-200d-2642-fe0f", "kneeling_person_light_skin_tone": "1f9ce-1f3fb", "kneeling_person_medium_light_skin_tone": "1f9ce-1f3fc", "kneeling_person_medium_skin_tone": "1f9ce-1f3fd", "kneeling_person_medium_dark_skin_tone": "1f9ce-1f3fe", "kneeling_person_dark_skin_tone": "1f9ce-1f3ff", "deaf_woman_light_skin_tone": "1f9cf-1f3fb-200d-2640-fe0f", "deaf_woman_medium_light_skin_tone": "1f9cf-1f3fc-200d-2640-fe0f", "deaf_woman_medium_skin_tone": "1f9cf-1f3fd-200d-2640-fe0f", "deaf_woman_medium_dark_skin_tone": "1f9cf-1f3fe-200d-2640-fe0f", "deaf_woman_dark_skin_tone": "1f9cf-1f3ff-200d-2640-fe0f", "deaf_man_light_skin_tone": "1f9cf-1f3fb-200d-2642-fe0f", "deaf_man_medium_light_skin_tone": "1f9cf-1f3fc-200d-2642-fe0f", "deaf_man_medium_skin_tone": "1f9cf-1f3fd-200d-2642-fe0f", "deaf_man_medium_dark_skin_tone": "1f9cf-1f3fe-200d-2642-fe0f", "deaf_man_dark_skin_tone": "1f9cf-1f3ff-200d-2642-fe0f", "deaf_person_light_skin_tone": "1f9cf-1f3fb", "deaf_person_medium_light_skin_tone": "1f9cf-1f3fc", "deaf_person_medium_skin_tone": "1f9cf-1f3fd", "deaf_person_medium_dark_skin_tone": "1f9cf-1f3fe", "deaf_person_dark_skin_tone": "1f9cf-1f3ff", "farmer_light_skin_tone": "1f9d1-1f3fb-200d-1f33e", "farmer_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f33e", "farmer_medium_skin_tone": "1f9d1-1f3fd-200d-1f33e", "farmer_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f33e", "farmer_dark_skin_tone": "1f9d1-1f3ff-200d-1f33e", "cook_light_skin_tone": "1f9d1-1f3fb-200d-1f373", "cook_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f373", "cook_medium_skin_tone": "1f9d1-1f3fd-200d-1f373", "cook_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f373", "cook_dark_skin_tone": "1f9d1-1f3ff-200d-1f373", "person_feeding_baby_light_skin_tone": "1f9d1-1f3fb-200d-1f37c", "person_feeding_baby_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f37c", "person_feeding_baby_medium_skin_tone": "1f9d1-1f3fd-200d-1f37c", "person_feeding_baby_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f37c", "person_feeding_baby_dark_skin_tone": "1f9d1-1f3ff-200d-1f37c", "mx_claus_light_skin_tone": "1f9d1-1f3fb-200d-1f384", "mx_claus_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f384", "mx_claus_medium_skin_tone": "1f9d1-1f3fd-200d-1f384", "mx_claus_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f384", "mx_claus_dark_skin_tone": "1f9d1-1f3ff-200d-1f384", "student_light_skin_tone": "1f9d1-1f3fb-200d-1f393", "student_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f393", "student_medium_skin_tone": "1f9d1-1f3fd-200d-1f393", "student_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f393", "student_dark_skin_tone": "1f9d1-1f3ff-200d-1f393", "singer_light_skin_tone": "1f9d1-1f3fb-200d-1f3a4", "singer_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f3a4", "singer_medium_skin_tone": "1f9d1-1f3fd-200d-1f3a4", "singer_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f3a4", "singer_dark_skin_tone": "1f9d1-1f3ff-200d-1f3a4", "artist_light_skin_tone": "1f9d1-1f3fb-200d-1f3a8", "artist_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f3a8", "artist_medium_skin_tone": "1f9d1-1f3fd-200d-1f3a8", "artist_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f3a8", "artist_dark_skin_tone": "1f9d1-1f3ff-200d-1f3a8", "teacher_light_skin_tone": "1f9d1-1f3fb-200d-1f3eb", "teacher_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f3eb", "teacher_medium_skin_tone": "1f9d1-1f3fd-200d-1f3eb", "teacher_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f3eb", "teacher_dark_skin_tone": "1f9d1-1f3ff-200d-1f3eb", "factory_worker_light_skin_tone": "1f9d1-1f3fb-200d-1f3ed", "factory_worker_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f3ed", "factory_worker_medium_skin_tone": "1f9d1-1f3fd-200d-1f3ed", "factory_worker_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f3ed", "factory_worker_dark_skin_tone": "1f9d1-1f3ff-200d-1f3ed", "technologist_light_skin_tone": "1f9d1-1f3fb-200d-1f4bb", "technologist_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f4bb", "technologist_medium_skin_tone": "1f9d1-1f3fd-200d-1f4bb", "technologist_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f4bb", "technologist_dark_skin_tone": "1f9d1-1f3ff-200d-1f4bb", "office_worker_light_skin_tone": "1f9d1-1f3fb-200d-1f4bc", "office_worker_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f4bc", "office_worker_medium_skin_tone": "1f9d1-1f3fd-200d-1f4bc", "office_worker_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f4bc", "office_worker_dark_skin_tone": "1f9d1-1f3ff-200d-1f4bc", "mechanic_light_skin_tone": "1f9d1-1f3fb-200d-1f527", "mechanic_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f527", "mechanic_medium_skin_tone": "1f9d1-1f3fd-200d-1f527", "mechanic_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f527", "mechanic_dark_skin_tone": "1f9d1-1f3ff-200d-1f527", "scientist_light_skin_tone": "1f9d1-1f3fb-200d-1f52c", "scientist_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f52c", "scientist_medium_skin_tone": "1f9d1-1f3fd-200d-1f52c", "scientist_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f52c", "scientist_dark_skin_tone": "1f9d1-1f3ff-200d-1f52c", "astronaut_light_skin_tone": "1f9d1-1f3fb-200d-1f680", "astronaut_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f680", "astronaut_medium_skin_tone": "1f9d1-1f3fd-200d-1f680", "astronaut_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f680", "astronaut_dark_skin_tone": "1f9d1-1f3ff-200d-1f680", "firefighter_light_skin_tone": "1f9d1-1f3fb-200d-1f692", "firefighter_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f692", "firefighter_medium_skin_tone": "1f9d1-1f3fd-200d-1f692", "firefighter_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f692", "firefighter_dark_skin_tone": "1f9d1-1f3ff-200d-1f692", "people_holding_hands_light_skin_tone_light_skin_tone": "1f9d1-1f3fb-200d-1f91d-200d-1f9d1-1f3fb", "people_holding_hands_light_skin_tone_medium_light_skin_tone": "1f9d1-1f3fb-200d-1f91d-200d-1f9d1-1f3fc", "people_holding_hands_light_skin_tone_medium_skin_tone": "1f9d1-1f3fb-200d-1f91d-200d-1f9d1-1f3fd", "people_holding_hands_light_skin_tone_medium_dark_skin_tone": "1f9d1-1f3fb-200d-1f91d-200d-1f9d1-1f3fe", "people_holding_hands_light_skin_tone_dark_skin_tone": "1f9d1-1f3fb-200d-1f91d-200d-1f9d1-1f3ff", "people_holding_hands_medium_light_skin_tone_light_skin_tone": "1f9d1-1f3fc-200d-1f91d-200d-1f9d1-1f3fb", "people_holding_hands_medium_light_skin_tone_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f91d-200d-1f9d1-1f3fc", "people_holding_hands_medium_light_skin_tone_medium_skin_tone": "1f9d1-1f3fc-200d-1f91d-200d-1f9d1-1f3fd", "people_holding_hands_medium_light_skin_tone_medium_dark_skin_tone": "1f9d1-1f3fc-200d-1f91d-200d-1f9d1-1f3fe", "people_holding_hands_medium_light_skin_tone_dark_skin_tone": "1f9d1-1f3fc-200d-1f91d-200d-1f9d1-1f3ff", "people_holding_hands_medium_skin_tone_light_skin_tone": "1f9d1-1f3fd-200d-1f91d-200d-1f9d1-1f3fb", "people_holding_hands_medium_skin_tone_medium_light_skin_tone": "1f9d1-1f3fd-200d-1f91d-200d-1f9d1-1f3fc", "people_holding_hands_medium_skin_tone_medium_skin_tone": "1f9d1-1f3fd-200d-1f91d-200d-1f9d1-1f3fd", "people_holding_hands_medium_skin_tone_medium_dark_skin_tone": "1f9d1-1f3fd-200d-1f91d-200d-1f9d1-1f3fe", "people_holding_hands_medium_skin_tone_dark_skin_tone": "1f9d1-1f3fd-200d-1f91d-200d-1f9d1-1f3ff", "people_holding_hands_medium_dark_skin_tone_light_skin_tone": "1f9d1-1f3fe-200d-1f91d-200d-1f9d1-1f3fb", "people_holding_hands_medium_dark_skin_tone_medium_light_skin_tone": "1f9d1-1f3fe-200d-1f91d-200d-1f9d1-1f3fc", "people_holding_hands_medium_dark_skin_tone_medium_skin_tone": "1f9d1-1f3fe-200d-1f91d-200d-1f9d1-1f3fd", "people_holding_hands_medium_dark_skin_tone_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f91d-200d-1f9d1-1f3fe", "people_holding_hands_medium_dark_skin_tone_dark_skin_tone": "1f9d1-1f3fe-200d-1f91d-200d-1f9d1-1f3ff", "people_holding_hands_dark_skin_tone_light_skin_tone": "1f9d1-1f3ff-200d-1f91d-200d-1f9d1-1f3fb", "people_holding_hands_dark_skin_tone_medium_light_skin_tone": "1f9d1-1f3ff-200d-1f91d-200d-1f9d1-1f3fc", "people_holding_hands_dark_skin_tone_medium_skin_tone": "1f9d1-1f3ff-200d-1f91d-200d-1f9d1-1f3fd", "people_holding_hands_dark_skin_tone_medium_dark_skin_tone": "1f9d1-1f3ff-200d-1f91d-200d-1f9d1-1f3fe", "people_holding_hands_dark_skin_tone_dark_skin_tone": "1f9d1-1f3ff-200d-1f91d-200d-1f9d1-1f3ff", "person_with_probing_cane_light_skin_tone": "1f9d1-1f3fb-200d-1f9af", "person_with_probing_cane_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f9af", "person_with_probing_cane_medium_skin_tone": "1f9d1-1f3fd-200d-1f9af", "person_with_probing_cane_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f9af", "person_with_probing_cane_dark_skin_tone": "1f9d1-1f3ff-200d-1f9af", "red_haired_person_light_skin_tone": "1f9d1-1f3fb-200d-1f9b0", "red_haired_person_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f9b0", "red_haired_person_medium_skin_tone": "1f9d1-1f3fd-200d-1f9b0", "red_haired_person_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f9b0", "red_haired_person_dark_skin_tone": "1f9d1-1f3ff-200d-1f9b0", "curly_haired_person_light_skin_tone": "1f9d1-1f3fb-200d-1f9b1", "curly_haired_person_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f9b1", "curly_haired_person_medium_skin_tone": "1f9d1-1f3fd-200d-1f9b1", "curly_haired_person_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f9b1", "curly_haired_person_dark_skin_tone": "1f9d1-1f3ff-200d-1f9b1", "bald_person_light_skin_tone": "1f9d1-1f3fb-200d-1f9b2", "bald_person_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f9b2", "bald_person_medium_skin_tone": "1f9d1-1f3fd-200d-1f9b2", "bald_person_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f9b2", "bald_person_dark_skin_tone": "1f9d1-1f3ff-200d-1f9b2", "white_haired_person_light_skin_tone": "1f9d1-1f3fb-200d-1f9b3", "white_haired_person_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f9b3", "white_haired_person_medium_skin_tone": "1f9d1-1f3fd-200d-1f9b3", "white_haired_person_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f9b3", "white_haired_person_dark_skin_tone": "1f9d1-1f3ff-200d-1f9b3", "person_in_motorized_wheelchair_light_skin_tone": "1f9d1-1f3fb-200d-1f9bc", "person_in_motorized_wheelchair_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f9bc", "person_in_motorized_wheelchair_medium_skin_tone": "1f9d1-1f3fd-200d-1f9bc", "person_in_motorized_wheelchair_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f9bc", "person_in_motorized_wheelchair_dark_skin_tone": "1f9d1-1f3ff-200d-1f9bc", "person_in_manual_wheelchair_light_skin_tone": "1f9d1-1f3fb-200d-1f9bd", "person_in_manual_wheelchair_medium_light_skin_tone": "1f9d1-1f3fc-200d-1f9bd", "person_in_manual_wheelchair_medium_skin_tone": "1f9d1-1f3fd-200d-1f9bd", "person_in_manual_wheelchair_medium_dark_skin_tone": "1f9d1-1f3fe-200d-1f9bd", "person_in_manual_wheelchair_dark_skin_tone": "1f9d1-1f3ff-200d-1f9bd", "health_worker_light_skin_tone": "1f9d1-1f3fb-200d-2695-fe0f", "health_worker_medium_light_skin_tone": "1f9d1-1f3fc-200d-2695-fe0f", "health_worker_medium_skin_tone": "1f9d1-1f3fd-200d-2695-fe0f", "health_worker_medium_dark_skin_tone": "1f9d1-1f3fe-200d-2695-fe0f", "health_worker_dark_skin_tone": "1f9d1-1f3ff-200d-2695-fe0f", "judge_light_skin_tone": "1f9d1-1f3fb-200d-2696-fe0f", "judge_medium_light_skin_tone": "1f9d1-1f3fc-200d-2696-fe0f", "judge_medium_skin_tone": "1f9d1-1f3fd-200d-2696-fe0f", "judge_medium_dark_skin_tone": "1f9d1-1f3fe-200d-2696-fe0f", "judge_dark_skin_tone": "1f9d1-1f3ff-200d-2696-fe0f", "pilot_light_skin_tone": "1f9d1-1f3fb-200d-2708-fe0f", "pilot_medium_light_skin_tone": "1f9d1-1f3fc-200d-2708-fe0f", "pilot_medium_skin_tone": "1f9d1-1f3fd-200d-2708-fe0f", "pilot_medium_dark_skin_tone": "1f9d1-1f3fe-200d-2708-fe0f", "pilot_dark_skin_tone": "1f9d1-1f3ff-200d-2708-fe0f", "adult_light_skin_tone": "1f9d1-1f3fb", "adult_medium_light_skin_tone": "1f9d1-1f3fc", "adult_medium_skin_tone": "1f9d1-1f3fd", "adult_medium_dark_skin_tone": "1f9d1-1f3fe", "adult_dark_skin_tone": "1f9d1-1f3ff", "child_light_skin_tone": "1f9d2-1f3fb", "child_medium_light_skin_tone": "1f9d2-1f3fc", "child_medium_skin_tone": "1f9d2-1f3fd", "child_medium_dark_skin_tone": "1f9d2-1f3fe", "child_dark_skin_tone": "1f9d2-1f3ff", "older_adult_light_skin_tone": "1f9d3-1f3fb", "older_adult_medium_light_skin_tone": "1f9d3-1f3fc", "older_adult_medium_skin_tone": "1f9d3-1f3fd", "older_adult_medium_dark_skin_tone": "1f9d3-1f3fe", "older_adult_dark_skin_tone": "1f9d3-1f3ff", "bearded_person_light_skin_tone": "1f9d4-1f3fb", "bearded_person_medium_light_skin_tone": "1f9d4-1f3fc", "bearded_person_medium_skin_tone": "1f9d4-1f3fd", "bearded_person_medium_dark_skin_tone": "1f9d4-1f3fe", "bearded_person_dark_skin_tone": "1f9d4-1f3ff", "person_with_headscarf_light_skin_tone": "1f9d5-1f3fb", "person_with_headscarf_medium_light_skin_tone": "1f9d5-1f3fc", "person_with_headscarf_medium_skin_tone": "1f9d5-1f3fd", "person_with_headscarf_medium_dark_skin_tone": "1f9d5-1f3fe", "person_with_headscarf_dark_skin_tone": "1f9d5-1f3ff", "woman_in_steamy_room_light_skin_tone": "1f9d6-1f3fb-200d-2640-fe0f", "woman_in_steamy_room_medium_light_skin_tone": "1f9d6-1f3fc-200d-2640-fe0f", "woman_in_steamy_room_medium_skin_tone": "1f9d6-1f3fd-200d-2640-fe0f", "woman_in_steamy_room_medium_dark_skin_tone": "1f9d6-1f3fe-200d-2640-fe0f", "woman_in_steamy_room_dark_skin_tone": "1f9d6-1f3ff-200d-2640-fe0f", "man_in_steamy_room_light_skin_tone": "1f9d6-1f3fb-200d-2642-fe0f", "man_in_steamy_room_medium_light_skin_tone": "1f9d6-1f3fc-200d-2642-fe0f", "man_in_steamy_room_medium_skin_tone": "1f9d6-1f3fd-200d-2642-fe0f", "man_in_steamy_room_medium_dark_skin_tone": "1f9d6-1f3fe-200d-2642-fe0f", "man_in_steamy_room_dark_skin_tone": "1f9d6-1f3ff-200d-2642-fe0f", "person_in_steamy_room_light_skin_tone": "1f9d6-1f3fb", "person_in_steamy_room_medium_light_skin_tone": "1f9d6-1f3fc", "person_in_steamy_room_medium_skin_tone": "1f9d6-1f3fd", "person_in_steamy_room_medium_dark_skin_tone": "1f9d6-1f3fe", "person_in_steamy_room_dark_skin_tone": "1f9d6-1f3ff", "woman_climbing_light_skin_tone": "1f9d7-1f3fb-200d-2640-fe0f", "woman_climbing_medium_light_skin_tone": "1f9d7-1f3fc-200d-2640-fe0f", "woman_climbing_medium_skin_tone": "1f9d7-1f3fd-200d-2640-fe0f", "woman_climbing_medium_dark_skin_tone": "1f9d7-1f3fe-200d-2640-fe0f", "woman_climbing_dark_skin_tone": "1f9d7-1f3ff-200d-2640-fe0f", "man_climbing_light_skin_tone": "1f9d7-1f3fb-200d-2642-fe0f", "man_climbing_medium_light_skin_tone": "1f9d7-1f3fc-200d-2642-fe0f", "man_climbing_medium_skin_tone": "1f9d7-1f3fd-200d-2642-fe0f", "man_climbing_medium_dark_skin_tone": "1f9d7-1f3fe-200d-2642-fe0f", "man_climbing_dark_skin_tone": "1f9d7-1f3ff-200d-2642-fe0f", "person_climbing_light_skin_tone": "1f9d7-1f3fb", "person_climbing_medium_light_skin_tone": "1f9d7-1f3fc", "person_climbing_medium_skin_tone": "1f9d7-1f3fd", "person_climbing_medium_dark_skin_tone": "1f9d7-1f3fe", "person_climbing_dark_skin_tone": "1f9d7-1f3ff", "woman_in_lotus_position_light_skin_tone": "1f9d8-1f3fb-200d-2640-fe0f", "woman_in_lotus_position_medium_light_skin_tone": "1f9d8-1f3fc-200d-2640-fe0f", "woman_in_lotus_position_medium_skin_tone": "1f9d8-1f3fd-200d-2640-fe0f", "woman_in_lotus_position_medium_dark_skin_tone": "1f9d8-1f3fe-200d-2640-fe0f", "woman_in_lotus_position_dark_skin_tone": "1f9d8-1f3ff-200d-2640-fe0f", "man_in_lotus_position_light_skin_tone": "1f9d8-1f3fb-200d-2642-fe0f", "man_in_lotus_position_medium_light_skin_tone": "1f9d8-1f3fc-200d-2642-fe0f", "man_in_lotus_position_medium_skin_tone": "1f9d8-1f3fd-200d-2642-fe0f", "man_in_lotus_position_medium_dark_skin_tone": "1f9d8-1f3fe-200d-2642-fe0f", "man_in_lotus_position_dark_skin_tone": "1f9d8-1f3ff-200d-2642-fe0f", "person_in_lotus_position_light_skin_tone": "1f9d8-1f3fb", "person_in_lotus_position_medium_light_skin_tone": "1f9d8-1f3fc", "person_in_lotus_position_medium_skin_tone": "1f9d8-1f3fd", "person_in_lotus_position_medium_dark_skin_tone": "1f9d8-1f3fe", "person_in_lotus_position_dark_skin_tone": "1f9d8-1f3ff", "female_mage_light_skin_tone": "1f9d9-1f3fb-200d-2640-fe0f", "female_mage_medium_light_skin_tone": "1f9d9-1f3fc-200d-2640-fe0f", "female_mage_medium_skin_tone": "1f9d9-1f3fd-200d-2640-fe0f", "female_mage_medium_dark_skin_tone": "1f9d9-1f3fe-200d-2640-fe0f", "female_mage_dark_skin_tone": "1f9d9-1f3ff-200d-2640-fe0f", "male_mage_light_skin_tone": "1f9d9-1f3fb-200d-2642-fe0f", "male_mage_medium_light_skin_tone": "1f9d9-1f3fc-200d-2642-fe0f", "male_mage_medium_skin_tone": "1f9d9-1f3fd-200d-2642-fe0f", "male_mage_medium_dark_skin_tone": "1f9d9-1f3fe-200d-2642-fe0f", "male_mage_dark_skin_tone": "1f9d9-1f3ff-200d-2642-fe0f", "mage_light_skin_tone": "1f9d9-1f3fb", "mage_medium_light_skin_tone": "1f9d9-1f3fc", "mage_medium_skin_tone": "1f9d9-1f3fd", "mage_medium_dark_skin_tone": "1f9d9-1f3fe", "mage_dark_skin_tone": "1f9d9-1f3ff", "female_fairy_light_skin_tone": "1f9da-1f3fb-200d-2640-fe0f", "female_fairy_medium_light_skin_tone": "1f9da-1f3fc-200d-2640-fe0f", "female_fairy_medium_skin_tone": "1f9da-1f3fd-200d-2640-fe0f", "female_fairy_medium_dark_skin_tone": "1f9da-1f3fe-200d-2640-fe0f", "female_fairy_dark_skin_tone": "1f9da-1f3ff-200d-2640-fe0f", "male_fairy_light_skin_tone": "1f9da-1f3fb-200d-2642-fe0f", "male_fairy_medium_light_skin_tone": "1f9da-1f3fc-200d-2642-fe0f", "male_fairy_medium_skin_tone": "1f9da-1f3fd-200d-2642-fe0f", "male_fairy_medium_dark_skin_tone": "1f9da-1f3fe-200d-2642-fe0f", "male_fairy_dark_skin_tone": "1f9da-1f3ff-200d-2642-fe0f", "fairy_light_skin_tone": "1f9da-1f3fb", "fairy_medium_light_skin_tone": "1f9da-1f3fc", "fairy_medium_skin_tone": "1f9da-1f3fd", "fairy_medium_dark_skin_tone": "1f9da-1f3fe", "fairy_dark_skin_tone": "1f9da-1f3ff", "female_vampire_light_skin_tone": "1f9db-1f3fb-200d-2640-fe0f", "female_vampire_medium_light_skin_tone": "1f9db-1f3fc-200d-2640-fe0f", "female_vampire_medium_skin_tone": "1f9db-1f3fd-200d-2640-fe0f", "female_vampire_medium_dark_skin_tone": "1f9db-1f3fe-200d-2640-fe0f", "female_vampire_dark_skin_tone": "1f9db-1f3ff-200d-2640-fe0f", "male_vampire_light_skin_tone": "1f9db-1f3fb-200d-2642-fe0f", "male_vampire_medium_light_skin_tone": "1f9db-1f3fc-200d-2642-fe0f", "male_vampire_medium_skin_tone": "1f9db-1f3fd-200d-2642-fe0f", "male_vampire_medium_dark_skin_tone": "1f9db-1f3fe-200d-2642-fe0f", "male_vampire_dark_skin_tone": "1f9db-1f3ff-200d-2642-fe0f", "vampire_light_skin_tone": "1f9db-1f3fb", "vampire_medium_light_skin_tone": "1f9db-1f3fc", "vampire_medium_skin_tone": "1f9db-1f3fd", "vampire_medium_dark_skin_tone": "1f9db-1f3fe", "vampire_dark_skin_tone": "1f9db-1f3ff", "mermaid_light_skin_tone": "1f9dc-1f3fb-200d-2640-fe0f", "mermaid_medium_light_skin_tone": "1f9dc-1f3fc-200d-2640-fe0f", "mermaid_medium_skin_tone": "1f9dc-1f3fd-200d-2640-fe0f", "mermaid_medium_dark_skin_tone": "1f9dc-1f3fe-200d-2640-fe0f", "mermaid_dark_skin_tone": "1f9dc-1f3ff-200d-2640-fe0f", "merman_light_skin_tone": "1f9dc-1f3fb-200d-2642-fe0f", "merman_medium_light_skin_tone": "1f9dc-1f3fc-200d-2642-fe0f", "merman_medium_skin_tone": "1f9dc-1f3fd-200d-2642-fe0f", "merman_medium_dark_skin_tone": "1f9dc-1f3fe-200d-2642-fe0f", "merman_dark_skin_tone": "1f9dc-1f3ff-200d-2642-fe0f", "merperson_light_skin_tone": "1f9dc-1f3fb", "merperson_medium_light_skin_tone": "1f9dc-1f3fc", "merperson_medium_skin_tone": "1f9dc-1f3fd", "merperson_medium_dark_skin_tone": "1f9dc-1f3fe", "merperson_dark_skin_tone": "1f9dc-1f3ff", "female_elf_light_skin_tone": "1f9dd-1f3fb-200d-2640-fe0f", "female_elf_medium_light_skin_tone": "1f9dd-1f3fc-200d-2640-fe0f", "female_elf_medium_skin_tone": "1f9dd-1f3fd-200d-2640-fe0f", "female_elf_medium_dark_skin_tone": "1f9dd-1f3fe-200d-2640-fe0f", "female_elf_dark_skin_tone": "1f9dd-1f3ff-200d-2640-fe0f", "male_elf_light_skin_tone": "1f9dd-1f3fb-200d-2642-fe0f", "male_elf_medium_light_skin_tone": "1f9dd-1f3fc-200d-2642-fe0f", "male_elf_medium_skin_tone": "1f9dd-1f3fd-200d-2642-fe0f", "male_elf_medium_dark_skin_tone": "1f9dd-1f3fe-200d-2642-fe0f", "male_elf_dark_skin_tone": "1f9dd-1f3ff-200d-2642-fe0f", "elf_light_skin_tone": "1f9dd-1f3fb", "elf_medium_light_skin_tone": "1f9dd-1f3fc", "elf_medium_skin_tone": "1f9dd-1f3fd", "elf_medium_dark_skin_tone": "1f9dd-1f3fe", "elf_dark_skin_tone": "1f9dd-1f3ff", "point_up_light_skin_tone": "261d-1f3fb", "point_up_medium_light_skin_tone": "261d-1f3fc", "point_up_medium_skin_tone": "261d-1f3fd", "point_up_medium_dark_skin_tone": "261d-1f3fe", "point_up_dark_skin_tone": "261d-1f3ff", "woman-bouncing-ball_light_skin_tone": "26f9-1f3fb-200d-2640-fe0f", "basketball_woman_light_skin_tone": "26f9-1f3fb-200d-2640-fe0f", "woman-bouncing-ball_medium_light_skin_tone": "26f9-1f3fc-200d-2640-fe0f", "basketball_woman_medium_light_skin_tone": "26f9-1f3fc-200d-2640-fe0f", "woman-bouncing-ball_medium_skin_tone": "26f9-1f3fd-200d-2640-fe0f", "basketball_woman_medium_skin_tone": "26f9-1f3fd-200d-2640-fe0f", "woman-bouncing-ball_medium_dark_skin_tone": "26f9-1f3fe-200d-2640-fe0f", "basketball_woman_medium_dark_skin_tone": "26f9-1f3fe-200d-2640-fe0f", "woman-bouncing-ball_dark_skin_tone": "26f9-1f3ff-200d-2640-fe0f", "basketball_woman_dark_skin_tone": "26f9-1f3ff-200d-2640-fe0f", "man-bouncing-ball_light_skin_tone": "26f9-1f3fb-200d-2642-fe0f", "basketball_man_light_skin_tone": "26f9-1f3fb-200d-2642-fe0f", "man-bouncing-ball_medium_light_skin_tone": "26f9-1f3fc-200d-2642-fe0f", "basketball_man_medium_light_skin_tone": "26f9-1f3fc-200d-2642-fe0f", "man-bouncing-ball_medium_skin_tone": "26f9-1f3fd-200d-2642-fe0f", "basketball_man_medium_skin_tone": "26f9-1f3fd-200d-2642-fe0f", "man-bouncing-ball_medium_dark_skin_tone": "26f9-1f3fe-200d-2642-fe0f", "basketball_man_medium_dark_skin_tone": "26f9-1f3fe-200d-2642-fe0f", "man-bouncing-ball_dark_skin_tone": "26f9-1f3ff-200d-2642-fe0f", "basketball_man_dark_skin_tone": "26f9-1f3ff-200d-2642-fe0f", "person_with_ball_light_skin_tone": "26f9-1f3fb", "person_with_ball_medium_light_skin_tone": "26f9-1f3fc", "person_with_ball_medium_skin_tone": "26f9-1f3fd", "person_with_ball_medium_dark_skin_tone": "26f9-1f3fe", "person_with_ball_dark_skin_tone": "26f9-1f3ff", "fist_light_skin_tone": "270a-1f3fb", "fist_raised_light_skin_tone": "270a-1f3fb", "fist_medium_light_skin_tone": "270a-1f3fc", "fist_raised_medium_light_skin_tone": "270a-1f3fc", "fist_medium_skin_tone": "270a-1f3fd", "fist_raised_medium_skin_tone": "270a-1f3fd", "fist_medium_dark_skin_tone": "270a-1f3fe", "fist_raised_medium_dark_skin_tone": "270a-1f3fe", "fist_dark_skin_tone": "270a-1f3ff", "fist_raised_dark_skin_tone": "270a-1f3ff", "hand_light_skin_tone": "270b-1f3fb", "raised_hand_light_skin_tone": "270b-1f3fb", "hand_medium_light_skin_tone": "270b-1f3fc", "raised_hand_medium_light_skin_tone": "270b-1f3fc", "hand_medium_skin_tone": "270b-1f3fd", "raised_hand_medium_skin_tone": "270b-1f3fd", "hand_medium_dark_skin_tone": "270b-1f3fe", "raised_hand_medium_dark_skin_tone": "270b-1f3fe", "hand_dark_skin_tone": "270b-1f3ff", "raised_hand_dark_skin_tone": "270b-1f3ff", "v_light_skin_tone": "270c-1f3fb", "v_medium_light_skin_tone": "270c-1f3fc", "v_medium_skin_tone": "270c-1f3fd", "v_medium_dark_skin_tone": "270c-1f3fe", "v_dark_skin_tone": "270c-1f3ff", "writing_hand_light_skin_tone": "270d-1f3fb", "writing_hand_medium_light_skin_tone": "270d-1f3fc", "writing_hand_medium_skin_tone": "270d-1f3fd", "writing_hand_medium_dark_skin_tone": "270d-1f3fe", "writing_hand_dark_skin_tone": "270d-1f3ff", "mattermost": "mattermost"} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/emoji_search.go b/vendor/github.com/mattermost/mattermost-server/v6/model/emoji_search.go new file mode 100644 index 00000000..4d947a11 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/emoji_search.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type EmojiSearch struct { + Term string `json:"term"` + PrefixOnly bool `json:"prefix_only"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go b/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go new file mode 100644 index 00000000..3fc7294f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go @@ -0,0 +1,134 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "reflect" + "strconv" +) + +type FeatureFlags struct { + // Exists only for unit and manual testing. + // When set to a value, will be returned by the ping endpoint. + TestFeature string + // Exists only for testing bool functionality. Boolean feature flags interpret "on" or "true" as true and + // all other values as false. + TestBoolFeature bool + + // Toggle on and off support for Collapsed Threads + CollapsedThreads bool + + // Enable the remote cluster service for shared channels. + EnableRemoteClusterService bool + + // AppsEnabled toggles the Apps framework functionalities both in server and client side + AppsEnabled bool + + // AppBarEnabled toggles the App Bar component on client side + AppBarEnabled bool + + // Feature flags to control plugin versions + PluginPlaybooks string `plugin_id:"playbooks"` + PluginApps string `plugin_id:"com.mattermost.apps"` + PluginFocalboard string `plugin_id:"focalboard"` + + PermalinkPreviews bool + + // Enable Calls plugin support in the mobile app + CallsMobile bool + + // A dash separated list for feature flags to turn on for Boards + BoardsFeatureFlags string + + // Enable Create First Channel + GuidedChannelCreation bool + + // A/B test for whether radio buttons or toggle button is more effective in in-screen invite to team modal ("none", "toggle") + InviteToTeam string + + CustomGroups bool + + // Enable DataRetention for Boards + BoardsDataRetention bool + + NormalizeLdapDNs bool + + EnableInactivityCheckJob bool + + // Enable special onboarding flow for first admin + UseCaseOnboarding bool + + // Enable GraphQL feature + GraphQL bool + + InsightsEnabled bool + + CommandPalette bool +} + +func (f *FeatureFlags) SetDefaults() { + f.TestFeature = "off" + f.TestBoolFeature = false + f.CollapsedThreads = true + f.EnableRemoteClusterService = false + f.AppsEnabled = true + f.AppBarEnabled = false + f.PluginApps = "" + f.PluginFocalboard = "" + f.PermalinkPreviews = true + f.CallsMobile = false + f.BoardsFeatureFlags = "" + f.GuidedChannelCreation = false + f.InviteToTeam = "none" + f.CustomGroups = true + f.BoardsDataRetention = false + f.NormalizeLdapDNs = false + f.EnableInactivityCheckJob = true + f.UseCaseOnboarding = true + f.GraphQL = false + f.InsightsEnabled = false + f.CommandPalette = false +} +func (f *FeatureFlags) Plugins() map[string]string { + rFFVal := reflect.ValueOf(f).Elem() + rFFType := reflect.TypeOf(f).Elem() + + pluginVersions := make(map[string]string) + for i := 0; i < rFFVal.NumField(); i++ { + rFieldVal := rFFVal.Field(i) + rFieldType := rFFType.Field(i) + + pluginId, hasPluginId := rFieldType.Tag.Lookup("plugin_id") + if !hasPluginId { + continue + } + + pluginVersions[pluginId] = rFieldVal.String() + } + + return pluginVersions +} + +// ToMap returns the feature flags as a map[string]string +// Supports boolean and string feature flags. +func (f *FeatureFlags) ToMap() map[string]string { + refStructVal := reflect.ValueOf(*f) + refStructType := reflect.TypeOf(*f) + ret := make(map[string]string) + for i := 0; i < refStructVal.NumField(); i++ { + refFieldVal := refStructVal.Field(i) + if !refFieldVal.IsValid() { + continue + } + refFieldType := refStructType.Field(i) + switch refFieldType.Type.Kind() { + case reflect.Bool: + ret[refFieldType.Name] = strconv.FormatBool(refFieldVal.Bool()) + default: + ret[refFieldType.Name] = refFieldVal.String() + } + } + + return ret +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/file.go b/vendor/github.com/mattermost/mattermost-server/v6/model/file.go new file mode 100644 index 00000000..63d1ad33 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/file.go @@ -0,0 +1,13 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + MaxImageSize = int64(6048 * 4032) // 24 megapixels, roughly 36MB as a raw image +) + +type FileUploadResponse struct { + FileInfos []*FileInfo `json:"file_infos"` + ClientIds []string `json:"client_ids"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go b/vendor/github.com/mattermost/mattermost-server/v6/model/file_info.go similarity index 67% rename from vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/file_info.go index 8a3a5cc0..9519ef45 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/file_info.go @@ -4,8 +4,6 @@ package model import ( - "bytes" - "encoding/json" "image" "image/gif" "io" @@ -16,8 +14,8 @@ import ( ) const ( - FILEINFO_SORT_BY_CREATED = "CreateAt" - FILEINFO_SORT_BY_SIZE = "Size" + FileinfoSortByCreated = "CreateAt" + FileinfoSortBySize = "Size" ) // GetFileInfosOptions contains options for getting FileInfos @@ -37,54 +35,26 @@ type GetFileInfosOptions struct { } type FileInfo struct { - Id string `json:"id"` - CreatorId string `json:"user_id"` - PostId string `json:"post_id,omitempty"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - Path string `json:"-"` // not sent back to the client - ThumbnailPath string `json:"-"` // not sent back to the client - PreviewPath string `json:"-"` // not sent back to the client - Name string `json:"name"` - Extension string `json:"extension"` - Size int64 `json:"size"` - MimeType string `json:"mime_type"` - Width int `json:"width,omitempty"` - Height int `json:"height,omitempty"` - HasPreviewImage bool `json:"has_preview_image,omitempty"` -} - -func (fi *FileInfo) ToJson() string { - b, _ := json.Marshal(fi) - return string(b) -} - -func FileInfoFromJson(data io.Reader) *FileInfo { - decoder := json.NewDecoder(data) - - var fi FileInfo - if err := decoder.Decode(&fi); err != nil { - return nil - } else { - return &fi - } -} - -func FileInfosToJson(infos []*FileInfo) string { - b, _ := json.Marshal(infos) - return string(b) -} - -func FileInfosFromJson(data io.Reader) []*FileInfo { - decoder := json.NewDecoder(data) - - var infos []*FileInfo - if err := decoder.Decode(&infos); err != nil { - return nil - } else { - return infos - } + Id string `json:"id"` + CreatorId string `json:"user_id"` + PostId string `json:"post_id,omitempty"` + ChannelId string `db:"-" json:"channel_id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + Path string `json:"-"` // not sent back to the client + ThumbnailPath string `json:"-"` // not sent back to the client + PreviewPath string `json:"-"` // not sent back to the client + Name string `json:"name"` + Extension string `json:"extension"` + Size int64 `json:"size"` + MimeType string `json:"mime_type"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` + HasPreviewImage bool `json:"has_preview_image,omitempty"` + MiniPreview *[]byte `json:"mini_preview"` // declared as *[]byte to avoid postgres/mysql differences in deserialization + Content string `json:"-"` + RemoteId *string `json:"remote_id"` } func (fi *FileInfo) PreSave() { @@ -99,6 +69,10 @@ func (fi *FileInfo) PreSave() { if fi.UpdateAt < fi.CreateAt { fi.UpdateAt = fi.CreateAt } + + if fi.RemoteId == nil { + fi.RemoteId = NewString("") + } } func (fi *FileInfo) IsValid() *AppError { @@ -110,7 +84,7 @@ func (fi *FileInfo) IsValid() *AppError { return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.user_id.app_error", nil, "id="+fi.Id, http.StatusBadRequest) } - if len(fi.PostId) != 0 && !IsValidId(fi.PostId) { + if fi.PostId != "" && !IsValidId(fi.PostId) { return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.post_id.app_error", nil, "id="+fi.Id, http.StatusBadRequest) } @@ -151,10 +125,10 @@ func NewInfo(name string) *FileInfo { return info } -func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) { +func GetInfoForBytes(name string, data io.ReadSeeker, size int) (*FileInfo, *AppError) { info := &FileInfo{ Name: name, - Size: int64(len(data)), + Size: int64(size), } var err *AppError @@ -170,19 +144,20 @@ func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) { if info.IsImage() { // Only set the width and height if it's actually an image that we can understand - if config, _, err := image.DecodeConfig(bytes.NewReader(data)); err == nil { + if config, _, err := image.DecodeConfig(data); err == nil { info.Width = config.Width info.Height = config.Height if info.MimeType == "image/gif" { // Just show the gif itself instead of a preview image for animated gifs - if gifConfig, err := gif.DecodeAll(bytes.NewReader(data)); err != nil { + data.Seek(0, io.SeekStart) + gifConfig, err := gif.DecodeAll(data) + if err != nil { // Still return the rest of the info even though it doesn't appear to be an actual gif info.HasPreviewImage = true - return info, NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "name="+name, http.StatusBadRequest) - } else { - info.HasPreviewImage = len(gifConfig.Image) == 1 + return info, NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, err.Error(), http.StatusBadRequest) } + info.HasPreviewImage = len(gifConfig.Image) == 1 } else { info.HasPreviewImage = true } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/file_info_list.go b/vendor/github.com/mattermost/mattermost-server/v6/model/file_info_list.go new file mode 100644 index 00000000..219b24f1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/file_info_list.go @@ -0,0 +1,111 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "sort" +) + +type FileInfoList struct { + Order []string `json:"order"` + FileInfos map[string]*FileInfo `json:"file_infos"` + NextFileInfoId string `json:"next_file_info_id"` + PrevFileInfoId string `json:"prev_file_info_id"` +} + +func NewFileInfoList() *FileInfoList { + return &FileInfoList{ + Order: make([]string, 0), + FileInfos: make(map[string]*FileInfo), + NextFileInfoId: "", + PrevFileInfoId: "", + } +} + +func (o *FileInfoList) ToSlice() []*FileInfo { + var fileInfos []*FileInfo + for _, id := range o.Order { + fileInfos = append(fileInfos, o.FileInfos[id]) + } + return fileInfos +} + +func (o *FileInfoList) MakeNonNil() { + if o.Order == nil { + o.Order = make([]string, 0) + } + + if o.FileInfos == nil { + o.FileInfos = make(map[string]*FileInfo) + } +} + +func (o *FileInfoList) AddOrder(id string) { + if o.Order == nil { + o.Order = make([]string, 0, 128) + } + + o.Order = append(o.Order, id) +} + +func (o *FileInfoList) AddFileInfo(fileInfo *FileInfo) { + if o.FileInfos == nil { + o.FileInfos = make(map[string]*FileInfo) + } + + o.FileInfos[fileInfo.Id] = fileInfo +} + +func (o *FileInfoList) UniqueOrder() { + keys := make(map[string]bool) + order := []string{} + for _, fileInfoId := range o.Order { + if _, value := keys[fileInfoId]; !value { + keys[fileInfoId] = true + order = append(order, fileInfoId) + } + } + + o.Order = order +} + +func (o *FileInfoList) Extend(other *FileInfoList) { + for fileInfoId := range other.FileInfos { + o.AddFileInfo(other.FileInfos[fileInfoId]) + } + + for _, fileInfoId := range other.Order { + o.AddOrder(fileInfoId) + } + + o.UniqueOrder() +} + +func (o *FileInfoList) SortByCreateAt() { + sort.Slice(o.Order, func(i, j int) bool { + return o.FileInfos[o.Order[i]].CreateAt > o.FileInfos[o.Order[j]].CreateAt + }) +} + +func (o *FileInfoList) Etag() string { + id := "0" + var t int64 = 0 + + for _, v := range o.FileInfos { + if v.UpdateAt > t { + t = v.UpdateAt + id = v.Id + } else if v.UpdateAt == t && v.Id > id { + t = v.UpdateAt + id = v.Id + } + } + + orderId := "" + if len(o.Order) > 0 { + orderId = o.Order[0] + } + + return Etag(orderId, id, t) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/file_info_search_results.go b/vendor/github.com/mattermost/mattermost-server/v6/model/file_info_search_results.go new file mode 100644 index 00000000..fddbffd4 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/file_info_search_results.go @@ -0,0 +1,18 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type FileInfoSearchMatches map[string][]string + +type FileInfoSearchResults struct { + *FileInfoList + Matches FileInfoSearchMatches `json:"matches"` +} + +func MakeFileInfoSearchResults(fileInfos *FileInfoList, matches FileInfoSearchMatches) *FileInfoSearchResults { + return &FileInfoSearchResults{ + fileInfos, + matches, + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/github_release.go b/vendor/github.com/mattermost/mattermost-server/v6/model/github_release.go new file mode 100644 index 00000000..75cc0a5f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/github_release.go @@ -0,0 +1,26 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" +) + +type GithubReleaseInfo struct { + Id int `json:"id"` + TagName string `json:"tag_name"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + PublishedAt string `json:"published_at"` + Body string `json:"body"` + Url string `json:"html_url"` +} + +func (g *GithubReleaseInfo) IsValid() *AppError { + if g.Id == 0 { + return NewAppError("GithubReleaseInfo.IsValid", "model.github_release_info.is_valid.id.app_error", nil, "", http.StatusInternalServerError) + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/gitlab.go b/vendor/github.com/mattermost/mattermost-server/v6/model/gitlab.go similarity index 78% rename from vendor/github.com/mattermost/mattermost-server/v5/model/gitlab.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/gitlab.go index 0b069cd6..c6233f13 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/gitlab.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/gitlab.go @@ -4,5 +4,5 @@ package model const ( - USER_AUTH_SERVICE_GITLAB = "gitlab" + UserAuthServiceGitlab = "gitlab" ) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/group.go b/vendor/github.com/mattermost/mattermost-server/v6/model/group.go similarity index 81% rename from vendor/github.com/mattermost/mattermost-server/v5/model/group.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/group.go index 2eda1184..428c431a 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/group.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/group.go @@ -4,14 +4,13 @@ package model import ( - "encoding/json" - "io" "net/http" "regexp" ) const ( - GroupSourceLdap GroupSource = "ldap" + GroupSourceLdap GroupSource = "ldap" + GroupSourceCustom GroupSource = "custom" GroupNameMaxLength = 64 GroupSourceMaxLength = 64 @@ -24,6 +23,7 @@ type GroupSource string var allGroupSources = []GroupSource{ GroupSourceLdap, + GroupSourceCustom, } var groupSourcesRequiringRemoteID = []GroupSource{ @@ -36,7 +36,7 @@ type Group struct { DisplayName string `json:"display_name"` Description string `json:"description"` Source GroupSource `json:"source"` - RemoteId string `json:"remote_id"` + RemoteId *string `json:"remote_id"` CreateAt int64 `json:"create_at"` UpdateAt int64 `json:"update_at"` DeleteAt int64 `json:"delete_at"` @@ -45,6 +45,11 @@ type Group struct { AllowReference bool `json:"allow_reference"` } +type GroupWithUserIds struct { + Group + UserIds []string `json:"user_ids"` +} + type GroupWithSchemeAdmin struct { Group SchemeAdmin *bool `db:"SyncableSchemeAdmin" json:"scheme_admin,omitempty"` @@ -65,6 +70,8 @@ type GroupPatch struct { DisplayName *string `json:"display_name"` Description *string `json:"description"` AllowReference *bool `json:"allow_reference"` + // For security reasons (including preventing unintended LDAP group synchronization) do no allow a Group's RemoteId or Source field to be + // included in patches. } type LdapGroupSearchOpts struct { @@ -81,12 +88,21 @@ type GroupSearchOpts struct { FilterAllowReference bool PageOpts *PageOpts Since int64 + Source GroupSource // FilterParentTeamPermitted filters the groups to the intersect of the // set associated to the parent team and those returned by the query. // If the parent team is not group-constrained or if NotAssociatedToChannel // is not set then this option is ignored. FilterParentTeamPermitted bool + + // FilterHasMember filters the groups to the intersect of the + // set returned by the query and those that have the given user as a member. + FilterHasMember string +} + +type GetGroupOpts struct { + IncludeMemberCount bool } type PageOpts struct { @@ -99,6 +115,10 @@ type GroupStats struct { TotalMemberCount int64 `json:"total_member_count"` } +type GroupModifyMembers struct { + UserIds []string `json:"user_ids"` +} + func (group *Group) Patch(patch *GroupPatch) { if patch.Name != nil { group.Name = patch.Name @@ -139,7 +159,7 @@ func (group *Group) IsValidForCreate() *AppError { return NewAppError("Group.IsValidForCreate", "model.group.source.app_error", nil, "", http.StatusBadRequest) } - if len(group.RemoteId) > GroupRemoteIDMaxLength || (len(group.RemoteId) == 0 && group.requiresRemoteId()) { + if (group.GetRemoteId() == "" && group.requiresRemoteId()) || len(group.GetRemoteId()) > GroupRemoteIDMaxLength { return NewAppError("Group.IsValidForCreate", "model.group.remote_id.app_error", nil, "", http.StatusBadRequest) } @@ -157,7 +177,7 @@ func (group *Group) requiresRemoteId() bool { func (group *Group) IsValidForUpdate() *AppError { if !IsValidId(group.Id) { - return NewAppError("Group.IsValidForUpdate", "model.group.id.app_error", nil, "", http.StatusBadRequest) + return NewAppError("Group.IsValidForUpdate", "app.group.id.app_error", nil, "", http.StatusBadRequest) } if group.CreateAt == 0 { return NewAppError("Group.IsValidForUpdate", "model.group.create_at.app_error", nil, "", http.StatusBadRequest) @@ -171,11 +191,6 @@ func (group *Group) IsValidForUpdate() *AppError { return nil } -func (group *Group) ToJson() string { - b, _ := json.Marshal(group) - return string(b) -} - var validGroupnameChars = regexp.MustCompile(`^[a-z0-9\.\-_]+$`) func (group *Group) IsValidName() *AppError { @@ -196,26 +211,21 @@ func (group *Group) IsValidName() *AppError { return nil } -func GroupFromJson(data io.Reader) *Group { - var group *Group - json.NewDecoder(data).Decode(&group) - return group -} - -func GroupsFromJson(data io.Reader) []*Group { - var groups []*Group - json.NewDecoder(data).Decode(&groups) - return groups +func (group *Group) GetName() string { + if group.Name == nil { + return "" + } + return *group.Name } -func GroupPatchFromJson(data io.Reader) *GroupPatch { - var groupPatch *GroupPatch - json.NewDecoder(data).Decode(&groupPatch) - return groupPatch +func (group *Group) GetRemoteId() string { + if group.RemoteId == nil { + return "" + } + return *group.RemoteId } -func GroupStatsFromJson(data io.Reader) *GroupStats { - var groupStats *GroupStats - json.NewDecoder(data).Decode(&groupStats) - return groupStats +type GroupsWithCount struct { + Groups []*Group `json:"groups"` + TotalCount int64 `json:"total_count"` } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/group_member.go b/vendor/github.com/mattermost/mattermost-server/v6/model/group_member.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/group_member.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/group_member.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/group_syncable.go b/vendor/github.com/mattermost/mattermost-server/v6/model/group_syncable.go similarity index 80% rename from vendor/github.com/mattermost/mattermost-server/v5/model/group_syncable.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/group_syncable.go index 6a4d4023..1f1bf60f 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/group_syncable.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/group_syncable.go @@ -6,8 +6,6 @@ package model import ( "encoding/json" "fmt" - "io" - "io/ioutil" "net/http" ) @@ -60,14 +58,14 @@ func (syncable *GroupSyncable) UnmarshalJSON(b []byte) error { if err != nil { return err } + var channelId string + var teamId string for key, value := range kvp { switch key { case "team_id": - syncable.SyncableId = value.(string) - syncable.Type = GroupSyncableTypeTeam + teamId = value.(string) case "channel_id": - syncable.SyncableId = value.(string) - syncable.Type = GroupSyncableTypeChannel + channelId = value.(string) case "group_id": syncable.GroupId = value.(string) case "auto_add": @@ -75,30 +73,40 @@ func (syncable *GroupSyncable) UnmarshalJSON(b []byte) error { default: } } + if channelId != "" { + syncable.TeamID = teamId + syncable.SyncableId = channelId + syncable.Type = GroupSyncableTypeChannel + } else { + syncable.SyncableId = teamId + syncable.Type = GroupSyncableTypeTeam + } return nil } func (syncable *GroupSyncable) MarshalJSON() ([]byte, error) { type Alias GroupSyncable - switch syncable.Type { case GroupSyncableTypeTeam: return json.Marshal(&struct { - TeamID string `json:"team_id"` - TeamDisplayName string `json:"team_display_name,omitempty"` - TeamType string `json:"team_type,omitempty"` + TeamID string `json:"team_id"` + TeamDisplayName string `json:"team_display_name,omitempty"` + TeamType string `json:"team_type,omitempty"` + Type GroupSyncableType `json:"type,omitempty"` *Alias }{ TeamDisplayName: syncable.TeamDisplayName, TeamType: syncable.TeamType, TeamID: syncable.SyncableId, + Type: syncable.Type, Alias: (*Alias)(syncable), }) case GroupSyncableTypeChannel: return json.Marshal(&struct { - ChannelID string `json:"channel_id"` - ChannelDisplayName string `json:"channel_display_name,omitempty"` - ChannelType string `json:"channel_type,omitempty"` + ChannelID string `json:"channel_id"` + ChannelDisplayName string `json:"channel_display_name,omitempty"` + ChannelType string `json:"channel_type,omitempty"` + Type GroupSyncableType `json:"type,omitempty"` TeamID string `json:"team_id,omitempty"` TeamDisplayName string `json:"team_display_name,omitempty"` @@ -109,6 +117,7 @@ func (syncable *GroupSyncable) MarshalJSON() ([]byte, error) { ChannelID: syncable.SyncableId, ChannelDisplayName: syncable.ChannelDisplayName, ChannelType: syncable.ChannelType, + Type: syncable.Type, TeamID: syncable.TeamID, TeamDisplayName: syncable.TeamDisplayName, @@ -147,20 +156,6 @@ type UserChannelIDPair struct { ChannelID string } -func GroupSyncableFromJson(data io.Reader) *GroupSyncable { - groupSyncable := &GroupSyncable{} - bodyBytes, _ := ioutil.ReadAll(data) - json.Unmarshal(bodyBytes, groupSyncable) - return groupSyncable -} - -func GroupSyncablesFromJson(data io.Reader) []*GroupSyncable { - groupSyncables := []*GroupSyncable{} - bodyBytes, _ := ioutil.ReadAll(data) - json.Unmarshal(bodyBytes, &groupSyncables) - return groupSyncables -} - func NewGroupTeam(groupID, teamID string, autoAdd bool) *GroupSyncable { return &GroupSyncable{ GroupId: groupID, diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/guest_invite.go b/vendor/github.com/mattermost/mattermost-server/v6/model/guest_invite.go similarity index 73% rename from vendor/github.com/mattermost/mattermost-server/v5/model/guest_invite.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/guest_invite.go index 3cdd4893..5103ccd1 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/guest_invite.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/guest_invite.go @@ -4,8 +4,6 @@ package model import ( - "encoding/json" - "io" "net/http" ) @@ -23,7 +21,7 @@ func (i *GuestsInvite) IsValid() *AppError { } for _, email := range i.Emails { - if len(email) > USER_EMAIL_MAX_LENGTH || len(email) == 0 || !IsValidEmail(email) { + if len(email) > UserEmailMaxLength || email == "" || !IsValidEmail(email) { return NewAppError("GuestsInvite.IsValid", "model.guest.is_valid.email.app_error", nil, "email="+email, http.StatusBadRequest) } } @@ -39,15 +37,3 @@ func (i *GuestsInvite) IsValid() *AppError { } return nil } - -// GuestsInviteFromJson will decode the input and return a GuestsInvite -func GuestsInviteFromJson(data io.Reader) *GuestsInvite { - var i *GuestsInvite - json.NewDecoder(data).Decode(&i) - return i -} - -func (i *GuestsInvite) ToJson() string { - b, _ := json.Marshal(i) - return string(b) -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/incoming_webhook.go b/vendor/github.com/mattermost/mattermost-server/v6/model/incoming_webhook.go similarity index 87% rename from vendor/github.com/mattermost/mattermost-server/v5/model/incoming_webhook.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/incoming_webhook.go index 78f1e4e8..ce7828e4 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/incoming_webhook.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/incoming_webhook.go @@ -12,7 +12,7 @@ import ( ) const ( - DEFAULT_WEBHOOK_USERNAME = "webhook" + DefaultWebhookUsername = "webhook" ) type IncomingWebhook struct { @@ -41,30 +41,7 @@ type IncomingWebhookRequest struct { IconEmoji string `json:"icon_emoji"` } -func (o *IncomingWebhook) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func IncomingWebhookFromJson(data io.Reader) *IncomingWebhook { - var o *IncomingWebhook - json.NewDecoder(data).Decode(&o) - return o -} - -func IncomingWebhookListToJson(l []*IncomingWebhook) string { - b, _ := json.Marshal(l) - return string(b) -} - -func IncomingWebhookListFromJson(data io.Reader) []*IncomingWebhook { - var o []*IncomingWebhook - json.NewDecoder(data).Decode(&o) - return o -} - func (o *IncomingWebhook) IsValid() *AppError { - if !IsValidId(o.Id) { return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.id.app_error", nil, "", http.StatusBadRequest) @@ -182,12 +159,11 @@ func decodeIncomingWebhookRequest(by []byte) (*IncomingWebhookRequest, error) { err := decoder.Decode(&o) if err == nil { return &o, nil - } else { - return nil, err } + return nil, err } -func IncomingWebhookRequestFromJson(data io.Reader) (*IncomingWebhookRequest, *AppError) { +func IncomingWebhookRequestFromJSON(data io.Reader) (*IncomingWebhookRequest, *AppError) { buf := new(bytes.Buffer) buf.ReadFrom(data) by := buf.Bytes() @@ -198,7 +174,7 @@ func IncomingWebhookRequestFromJson(data io.Reader) (*IncomingWebhookRequest, *A if err != nil { o, err = decodeIncomingWebhookRequest(escapeControlCharsFromPayload(by)) if err != nil { - return nil, NewAppError("IncomingWebhookRequestFromJson", "model.incoming_hook.parse_data.app_error", nil, err.Error(), http.StatusBadRequest) + return nil, NewAppError("IncomingWebhookRequestFromJSON", "model.incoming_hook.parse_data.app_error", nil, err.Error(), http.StatusBadRequest) } } @@ -206,12 +182,3 @@ func IncomingWebhookRequestFromJson(data io.Reader) (*IncomingWebhookRequest, *A return o, nil } - -func (o *IncomingWebhookRequest) ToJson() string { - b, err := json.Marshal(o) - if err != nil { - return "" - } else { - return string(b) - } -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/initial_load.go b/vendor/github.com/mattermost/mattermost-server/v6/model/initial_load.go similarity index 67% rename from vendor/github.com/mattermost/mattermost-server/v5/model/initial_load.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/initial_load.go index 9368f371..5ecddda2 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/initial_load.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/initial_load.go @@ -3,11 +3,6 @@ package model -import ( - "encoding/json" - "io" -) - type InitialLoad struct { User *User `json:"user"` TeamMembers []*TeamMember `json:"team_members"` @@ -17,14 +12,3 @@ type InitialLoad struct { LicenseCfg map[string]string `json:"license_cfg"` NoAccounts bool `json:"no_accounts"` } - -func (me *InitialLoad) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func InitialLoadFromJson(data io.Reader) *InitialLoad { - var o *InitialLoad - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/insights.go b/vendor/github.com/mattermost/mattermost-server/v6/model/insights.go new file mode 100644 index 00000000..e274bb83 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/insights.go @@ -0,0 +1,76 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" + "time" +) + +const ( + TimeRangeToday string = "today" + TimeRange7Day string = "7_day" + TimeRange28Day string = "28_day" +) + +type InsightsOpts struct { + StartUnixMilli int64 + Page int + PerPage int +} + +type InsightsListData struct { + HasNext bool `json:"has_next"` +} + +type InsightsData struct { + Rank int `json:"rank"` +} + +type TopReactionList struct { + InsightsListData + Items []*TopReaction `json:"items"` +} + +type TopReaction struct { + InsightsData + EmojiName string `json:"emoji_name"` + Count int64 `json:"count"` +} + +// GetStartUnixMilliForTimeRange gets the unix start time in milliseconds from the given time range. +// Time range can be one of: "1_day", "7_day", or "28_day". +func GetStartUnixMilliForTimeRange(timeRange string) (int64, *AppError) { + now := time.Now() + _, offset := now.Zone() + switch timeRange { + case TimeRangeToday: + return GetStartOfDayMillis(now, offset), nil + case TimeRange7Day: + return GetStartOfDayMillis(now.Add(time.Hour*time.Duration(-168)), offset), nil + case TimeRange28Day: + return GetStartOfDayMillis(now.Add(time.Hour*time.Duration(-672)), offset), nil + } + + return GetStartOfDayMillis(now, offset), NewAppError("Insights.IsValidRequest", "model.insights.time_range.app_error", nil, "", http.StatusBadRequest) +} + +// GetTopReactionListWithRankAndPagination adds a rank to each item in the given list of TopReaction and checks if there is +// another page that can be fetched based on the given limit and offset. The given list of TopReaction is assumed to be +// sorted by Count. Returns a TopReactionList. +func GetTopReactionListWithRankAndPagination(reactions []*TopReaction, limit int, offset int) *TopReactionList { + // Add pagination support + var hasNext bool + if (limit != 0) && (len(reactions) == limit+1) { + hasNext = true + reactions = reactions[:len(reactions)-1] + } + + // Assign rank to each reaction + for i, reaction := range reactions { + reaction.Rank = offset + i + 1 + } + + return &TopReactionList{InsightsListData: InsightsListData{HasNext: hasNext}, Items: reactions} +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/integration_action.go b/vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go similarity index 88% rename from vendor/github.com/mattermost/mattermost-server/v5/model/integration_action.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go index 3f362d64..4c645d02 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/integration_action.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go @@ -16,14 +16,15 @@ import ( "io" "math/big" "net/http" + "reflect" "strconv" "strings" ) const ( - POST_ACTION_TYPE_BUTTON = "button" - POST_ACTION_TYPE_SELECT = "select" - INTERACTIVE_DIALOG_TRIGGER_TIMEOUT_MILLISECONDS = 3000 + PostActionTypeButton = "button" + PostActionTypeSelect = "select" + InteractiveDialogTriggerTimeoutMilliseconds = 3000 ) var PostActionRetainPropKeys = []string{"from_webhook", "override_username", "override_icon_url"} @@ -114,6 +115,14 @@ func (p *PostAction) Equals(input *PostAction) bool { } // Compare PostActionIntegration + + // If input is nil, then return true if original is also nil. + // Else return false. + if input.Integration == nil { + return p.Integration == nil + } + + // Both are unequal and not nil. if p.Integration.URL != input.Integration.URL { return false } @@ -124,13 +133,19 @@ func (p *PostAction) Equals(input *PostAction) bool { for key, value := range p.Integration.Context { inputValue, ok := input.Integration.Context[key] - if !ok { return false } - if value != inputValue { - return false + switch inputValue.(type) { + case string, bool, int, float64: + if value != inputValue { + return false + } + default: + if !reflect.DeepEqual(value, inputValue) { + return false + } } } @@ -282,8 +297,8 @@ func DecodeAndVerifyTriggerId(triggerId string, s *ecdsa.PrivateKey) (string, st timestamp, _ := strconv.ParseInt(timestampStr, 10, 64) now := GetMillis() - if now-timestamp > INTERACTIVE_DIALOG_TRIGGER_TIMEOUT_MILLISECONDS { - return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.expired", map[string]interface{}{"Seconds": INTERACTIVE_DIALOG_TRIGGER_TIMEOUT_MILLISECONDS / 1000}, "", http.StatusBadRequest) + if now-timestamp > InteractiveDialogTriggerTimeoutMilliseconds { + return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.expired", map[string]interface{}{"Seconds": InteractiveDialogTriggerTimeoutMilliseconds / 1000}, "", http.StatusBadRequest) } signature, err := base64.StdEncoding.DecodeString(split[3]) @@ -316,62 +331,6 @@ func (r *OpenDialogRequest) DecodeAndVerifyTriggerId(s *ecdsa.PrivateKey) (strin return DecodeAndVerifyTriggerId(r.TriggerId, s) } -func (r *PostActionIntegrationRequest) ToJson() []byte { - b, _ := json.Marshal(r) - return b -} - -func PostActionIntegrationRequestFromJson(data io.Reader) *PostActionIntegrationRequest { - var o *PostActionIntegrationRequest - err := json.NewDecoder(data).Decode(&o) - if err != nil { - return nil - } - return o -} - -func (r *PostActionIntegrationResponse) ToJson() []byte { - b, _ := json.Marshal(r) - return b -} - -func PostActionIntegrationResponseFromJson(data io.Reader) *PostActionIntegrationResponse { - var o *PostActionIntegrationResponse - err := json.NewDecoder(data).Decode(&o) - if err != nil { - return nil - } - return o -} - -func SubmitDialogRequestFromJson(data io.Reader) *SubmitDialogRequest { - var o *SubmitDialogRequest - err := json.NewDecoder(data).Decode(&o) - if err != nil { - return nil - } - return o -} - -func (r *SubmitDialogRequest) ToJson() []byte { - b, _ := json.Marshal(r) - return b -} - -func SubmitDialogResponseFromJson(data io.Reader) *SubmitDialogResponse { - var o *SubmitDialogResponse - err := json.NewDecoder(data).Decode(&o) - if err != nil { - return nil - } - return o -} - -func (r *SubmitDialogResponse) ToJson() []byte { - b, _ := json.Marshal(r) - return b -} - func (o *Post) StripActionIntegrations() { attachments := o.Attachments() if o.GetProp("attachments") != nil { @@ -387,7 +346,7 @@ func (o *Post) StripActionIntegrations() { func (o *Post) GetAction(id string) *PostAction { for _, attachment := range o.Attachments() { for _, action := range attachment.Actions { - if action.Id == id { + if action != nil && action.Id == id { return action } } @@ -402,7 +361,7 @@ func (o *Post) GenerateActionIds() { if attachments, ok := o.GetProp("attachments").([]*SlackAttachment); ok { for _, attachment := range attachments { for _, action := range attachment.Actions { - if action.Id == "" { + if action != nil && action.Id == "" { action.Id = NewId() } } @@ -517,9 +476,3 @@ func DecryptPostActionCookie(encoded string, secret []byte) (string, error) { return string(plain), nil } - -func DoPostActionRequestFromJson(data io.Reader) *DoPostActionRequest { - var o *DoPostActionRequest - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/integrity.go b/vendor/github.com/mattermost/mattermost-server/v6/model/integrity.go new file mode 100644 index 00000000..744ad07c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/integrity.go @@ -0,0 +1,58 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "errors" +) + +type OrphanedRecord struct { + ParentId *string `json:"parent_id"` + ChildId *string `json:"child_id"` +} + +type RelationalIntegrityCheckData struct { + ParentName string `json:"parent_name"` + ChildName string `json:"child_name"` + ParentIdAttr string `json:"parent_id_attr"` + ChildIdAttr string `json:"child_id_attr"` + Records []OrphanedRecord `json:"records"` +} + +type IntegrityCheckResult struct { + Data interface{} `json:"data"` + Err error `json:"err"` +} + +func (r *IntegrityCheckResult) UnmarshalJSON(b []byte) error { + var data map[string]interface{} + if err := json.Unmarshal(b, &data); err != nil { + return err + } + if d, ok := data["data"]; ok && d != nil { + var rdata RelationalIntegrityCheckData + m := d.(map[string]interface{}) + rdata.ParentName = m["parent_name"].(string) + rdata.ChildName = m["child_name"].(string) + rdata.ParentIdAttr = m["parent_id_attr"].(string) + rdata.ChildIdAttr = m["child_id_attr"].(string) + for _, recData := range m["records"].([]interface{}) { + var record OrphanedRecord + m := recData.(map[string]interface{}) + if val := m["parent_id"]; val != nil { + record.ParentId = NewString(val.(string)) + } + if val := m["child_id"]; val != nil { + record.ChildId = NewString(val.(string)) + } + rdata.Records = append(rdata.Records, record) + } + r.Data = rdata + } + if err, ok := data["err"]; ok && err != nil { + r.Err = errors.New(data["err"].(string)) + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/job.go b/vendor/github.com/mattermost/mattermost-server/v6/model/job.go new file mode 100644 index 00000000..8b6272e8 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/job.go @@ -0,0 +1,106 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" + "time" +) + +const ( + JobTypeDataRetention = "data_retention" + JobTypeMessageExport = "message_export" + JobTypeElasticsearchPostIndexing = "elasticsearch_post_indexing" + JobTypeElasticsearchPostAggregation = "elasticsearch_post_aggregation" + JobTypeBlevePostIndexing = "bleve_post_indexing" + JobTypeLdapSync = "ldap_sync" + JobTypeMigrations = "migrations" + JobTypePlugins = "plugins" + JobTypeExpiryNotify = "expiry_notify" + JobTypeProductNotices = "product_notices" + JobTypeActiveUsers = "active_users" + JobTypeImportProcess = "import_process" + JobTypeImportDelete = "import_delete" + JobTypeExportProcess = "export_process" + JobTypeExportDelete = "export_delete" + JobTypeCloud = "cloud" + JobTypeResendInvitationEmail = "resend_invitation_email" + JobTypeExtractContent = "extract_content" + + JobStatusPending = "pending" + JobStatusInProgress = "in_progress" + JobStatusSuccess = "success" + JobStatusError = "error" + JobStatusCancelRequested = "cancel_requested" + JobStatusCanceled = "canceled" + JobStatusWarning = "warning" +) + +var AllJobTypes = [...]string{ + JobTypeDataRetention, + JobTypeMessageExport, + JobTypeElasticsearchPostIndexing, + JobTypeElasticsearchPostAggregation, + JobTypeBlevePostIndexing, + JobTypeLdapSync, + JobTypeMigrations, + JobTypePlugins, + JobTypeExpiryNotify, + JobTypeProductNotices, + JobTypeActiveUsers, + JobTypeImportProcess, + JobTypeImportDelete, + JobTypeExportProcess, + JobTypeExportDelete, + JobTypeCloud, + JobTypeExtractContent, +} + +type Job struct { + Id string `json:"id"` + Type string `json:"type"` + Priority int64 `json:"priority"` + CreateAt int64 `json:"create_at"` + StartAt int64 `json:"start_at"` + LastActivityAt int64 `json:"last_activity_at"` + Status string `json:"status"` + Progress int64 `json:"progress"` + Data StringMap `json:"data"` +} + +func (j *Job) IsValid() *AppError { + if !IsValidId(j.Id) { + return NewAppError("Job.IsValid", "model.job.is_valid.id.app_error", nil, "id="+j.Id, http.StatusBadRequest) + } + + if j.CreateAt == 0 { + return NewAppError("Job.IsValid", "model.job.is_valid.create_at.app_error", nil, "id="+j.Id, http.StatusBadRequest) + } + + switch j.Status { + case JobStatusPending: + case JobStatusInProgress: + case JobStatusSuccess: + case JobStatusError: + case JobStatusCancelRequested: + case JobStatusCanceled: + default: + return NewAppError("Job.IsValid", "model.job.is_valid.status.app_error", nil, "id="+j.Id, http.StatusBadRequest) + } + + return nil +} + +type Worker interface { + Run() + Stop() + JobChannel() chan<- Job + IsEnabled(cfg *Config) bool +} + +type Scheduler interface { + Enabled(cfg *Config) bool + NextScheduleTime(cfg *Config, now time.Time, pendingJobs bool, lastSuccessfulJob *Job) *time.Time + ScheduleJob(cfg *Config, pendingJobs bool, lastSuccessfulJob *Job) (*Job, *AppError) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/ldap.go b/vendor/github.com/mattermost/mattermost-server/v6/model/ldap.go new file mode 100644 index 00000000..314e7222 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/ldap.go @@ -0,0 +1,10 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + UserAuthServiceLdap = "ldap" + LdapPublicCertificateName = "ldap-public.crt" + LdapPrivateKeyName = "ldap-private.key" +) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/license.go b/vendor/github.com/mattermost/mattermost-server/v6/model/license.go similarity index 68% rename from vendor/github.com/mattermost/mattermost-server/v5/model/license.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/license.go index 365222b7..c647faca 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/license.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/license.go @@ -5,15 +5,38 @@ package model import ( "encoding/json" - "io" + "fmt" "net/http" + "time" ) const ( - EXPIRED_LICENSE_ERROR = "api.license.add_license.expired.app_error" - INVALID_LICENSE_ERROR = "api.license.add_license.invalid.app_error" - LICENSE_GRACE_PERIOD = 1000 * 60 * 60 * 24 * 10 //10 days - LICENSE_RENEWAL_LINK = "https://mattermost.com/renew/" + DayInSeconds = 24 * 60 * 60 + DayInMilliseconds = DayInSeconds * 1000 + + ExpiredLicenseError = "api.license.add_license.expired.app_error" + InvalidLicenseError = "api.license.add_license.invalid.app_error" + LicenseGracePeriod = DayInMilliseconds * 10 //10 days + LicenseRenewalLink = "https://mattermost.com/renew/" + + LicenseShortSkuE10 = "E10" + LicenseShortSkuE20 = "E20" + LicenseShortSkuProfessional = "professional" + LicenseShortSkuEnterprise = "enterprise" +) + +const ( + LicenseUpForRenewalEmailSent = "LicenseUpForRenewalEmailSent" +) + +var ( + trialDuration = 30*(time.Hour*24) + (time.Hour * 8) // 720 hours (30 days) + 8 hours is trial license duration + adminTrialDuration = 30*(time.Hour*24) + (time.Hour * 23) + (time.Minute * 59) + (time.Second * 59) // 720 hours (30 days) + 23 hours, 59 mins and 59 seconds + + // a sanctioned trial's duration is either more than the upper bound, + // or less than the lower bound + sanctionedTrialDurationLowerBound = 31*(time.Hour*24) + (time.Hour * 23) + (time.Minute * 59) + (time.Second * 59) // 744 hours (31 days) + 23 hours, 59 mins and 59 seconds + sanctionedTrialDurationUpperBound = 29*(time.Hour*24) + (time.Hour * 23) + (time.Minute * 59) + (time.Second * 59) // 696 hours (29 days) + 23 hours, 59 mins and 59 seconds ) type LicenseRecord struct { @@ -31,6 +54,8 @@ type License struct { Features *Features `json:"features"` SkuName string `json:"sku_name"` SkuShortName string `json:"sku_short_name"` + IsTrial bool `json:"is_trial"` + IsGovSku bool `json:"is_gov_sku"` } type Customer struct { @@ -51,11 +76,6 @@ type TrialLicenseRequest struct { ReceiveEmailsAccepted bool `json:"receive_emails_accepted"` } -func (tlr *TrialLicenseRequest) ToJson() string { - b, _ := json.Marshal(tlr) - return string(b) -} - type Features struct { Users *int `json:"users"` LDAP *bool `json:"ldap"` @@ -63,6 +83,7 @@ type Features struct { MFA *bool `json:"mfa"` GoogleOAuth *bool `json:"google_oauth"` Office365OAuth *bool `json:"office365_oauth"` + OpenId *bool `json:"openid"` Compliance *bool `json:"compliance"` Cluster *bool `json:"cluster"` Metrics *bool `json:"metrics"` @@ -82,6 +103,9 @@ type Features struct { LockTeammateNameDisplay *bool `json:"lock_teammate_name_display"` EnterprisePlugins *bool `json:"enterprise_plugins"` AdvancedLogging *bool `json:"advanced_logging"` + Cloud *bool `json:"cloud"` + SharedChannels *bool `json:"shared_channels"` + RemoteClusterService *bool `json:"remote_cluster_service"` // after we enabled more features we'll need to control them with this FutureFeatures *bool `json:"future_features"` @@ -94,6 +118,7 @@ func (f *Features) ToMap() map[string]interface{} { "mfa": *f.MFA, "google": *f.GoogleOAuth, "office365": *f.Office365OAuth, + "openid": *f.OpenId, "compliance": *f.Compliance, "cluster": *f.Cluster, "metrics": *f.Metrics, @@ -110,6 +135,9 @@ func (f *Features) ToMap() map[string]interface{} { "lock_teammate_name_display": *f.LockTeammateNameDisplay, "enterprise_plugins": *f.EnterprisePlugins, "advanced_logging": *f.AdvancedLogging, + "cloud": *f.Cloud, + "shared_channels": *f.SharedChannels, + "remote_cluster_service": *f.RemoteClusterService, "future": *f.FutureFeatures, } } @@ -143,6 +171,10 @@ func (f *Features) SetDefaults() { f.Office365OAuth = NewBool(*f.FutureFeatures) } + if f.OpenId == nil { + f.OpenId = NewBool(*f.FutureFeatures) + } + if f.Compliance == nil { f.Compliance = NewBool(*f.FutureFeatures) } @@ -218,6 +250,18 @@ func (f *Features) SetDefaults() { if f.AdvancedLogging == nil { f.AdvancedLogging = NewBool(*f.FutureFeatures) } + + if f.Cloud == nil { + f.Cloud = NewBool(false) + } + + if f.SharedChannels == nil { + f.SharedChannels = NewBool(*f.FutureFeatures) + } + + if f.RemoteClusterService == nil { + f.RemoteClusterService = NewBool(*f.FutureFeatures) + } } func (l *License) IsExpired() bool { @@ -226,22 +270,47 @@ func (l *License) IsExpired() bool { func (l *License) IsPastGracePeriod() bool { timeDiff := GetMillis() - l.ExpiresAt - return timeDiff > LICENSE_GRACE_PERIOD + return timeDiff > LicenseGracePeriod +} + +func (l *License) IsWithinExpirationPeriod() bool { + days := l.DaysToExpiration() + return days <= 60 && days >= 58 +} + +func (l *License) DaysToExpiration() int { + dif := l.ExpiresAt - GetMillis() + d, _ := time.ParseDuration(fmt.Sprint(dif) + "ms") + days := d.Hours() / 24 + return int(days) } func (l *License) IsStarted() bool { return l.StartsAt < GetMillis() } -func (l *License) ToJson() string { - b, _ := json.Marshal(l) - return string(b) +func (l *License) IsTrialLicense() bool { + return l.IsTrial || (l.ExpiresAt-l.StartsAt) == trialDuration.Milliseconds() || (l.ExpiresAt-l.StartsAt) == adminTrialDuration.Milliseconds() +} + +func (l *License) IsSanctionedTrial() bool { + duration := l.ExpiresAt - l.StartsAt + + return l.IsTrialLicense() && + (duration >= sanctionedTrialDurationLowerBound.Milliseconds() || duration <= sanctionedTrialDurationUpperBound.Milliseconds()) +} + +func (l *License) HasEnterpriseMarketplacePlugins() bool { + return *l.Features.EnterprisePlugins || + l.SkuShortName == LicenseShortSkuE20 || + l.SkuShortName == LicenseShortSkuProfessional || + l.SkuShortName == LicenseShortSkuEnterprise } // NewTestLicense returns a license that expires in the future and has the given features. func NewTestLicense(features ...string) *License { ret := &License{ - ExpiresAt: GetMillis() + 90*24*60*60*1000, + ExpiresAt: GetMillis() + 90*DayInMilliseconds, Customer: &Customer{}, Features: &Features{}, } @@ -257,10 +326,10 @@ func NewTestLicense(features ...string) *License { return ret } -func LicenseFromJson(data io.Reader) *License { - var o *License - json.NewDecoder(data).Decode(&o) - return o +func NewTestLicenseSKU(skuShortName string, features ...string) *License { + lic := NewTestLicense(features...) + lic.SkuShortName = skuShortName + return lic } func (lr *LicenseRecord) IsValid() *AppError { @@ -272,7 +341,7 @@ func (lr *LicenseRecord) IsValid() *AppError { return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } - if len(lr.Bytes) == 0 || len(lr.Bytes) > 10000 { + if lr.Bytes == "" || len(lr.Bytes) > 10000 { return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go b/vendor/github.com/mattermost/mattermost-server/v6/model/link_metadata.go similarity index 90% rename from vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/link_metadata.go index 6c3e0bd8..66d10739 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/link_metadata.go @@ -16,10 +16,10 @@ import ( ) const ( - LINK_METADATA_TYPE_IMAGE LinkMetadataType = "image" - LINK_METADATA_TYPE_NONE LinkMetadataType = "none" - LINK_METADATA_TYPE_OPENGRAPH LinkMetadataType = "opengraph" - MAX_IMAGES int = 5 + LinkMetadataTypeImage LinkMetadataType = "image" + LinkMetadataTypeNone LinkMetadataType = "none" + LinkMetadataTypeOpengraph LinkMetadataType = "opengraph" + LinkMetadataMaxImages int = 5 ) type LinkMetadataType string @@ -51,8 +51,8 @@ func truncateText(original string) string { } func firstNImages(images []*opengraph.Image, maxImages int) []*opengraph.Image { - if maxImages < 0 { // dont break stuff, if it's weird, go for sane defaults - maxImages = MAX_IMAGES + if maxImages < 0 { // don't break stuff, if it's weird, go for sane defaults + maxImages = LinkMetadataMaxImages } numImages := len(images) if numImages > maxImages { @@ -76,7 +76,7 @@ func TruncateOpenGraph(ogdata *opengraph.OpenGraph) *opengraph.OpenGraph { ogdata.Determiner = empty.Determiner ogdata.Locale = empty.Locale ogdata.LocalesAlternate = empty.LocalesAlternate - ogdata.Images = firstNImages(ogdata.Images, MAX_IMAGES) + ogdata.Images = firstNImages(ogdata.Images, LinkMetadataMaxImages) ogdata.Audios = empty.Audios ogdata.Videos = empty.Videos } @@ -97,7 +97,7 @@ func (o *LinkMetadata) IsValid() *AppError { } switch o.Type { - case LINK_METADATA_TYPE_IMAGE: + case LinkMetadataTypeImage: if o.Data == nil { return NewAppError("LinkMetadata.IsValid", "model.link_metadata.is_valid.data.app_error", nil, "", http.StatusBadRequest) } @@ -105,11 +105,11 @@ func (o *LinkMetadata) IsValid() *AppError { if _, ok := o.Data.(*PostImage); !ok { return NewAppError("LinkMetadata.IsValid", "model.link_metadata.is_valid.data_type.app_error", nil, "", http.StatusBadRequest) } - case LINK_METADATA_TYPE_NONE: + case LinkMetadataTypeNone: if o.Data != nil { return NewAppError("LinkMetadata.IsValid", "model.link_metadata.is_valid.data_type.app_error", nil, "", http.StatusBadRequest) } - case LINK_METADATA_TYPE_OPENGRAPH: + case LinkMetadataTypeOpengraph: if o.Data == nil { return NewAppError("LinkMetadata.IsValid", "model.link_metadata.is_valid.data.app_error", nil, "", http.StatusBadRequest) } @@ -146,13 +146,13 @@ func (o *LinkMetadata) DeserializeDataToConcreteType() error { var err error switch o.Type { - case LINK_METADATA_TYPE_IMAGE: + case LinkMetadataTypeImage: image := &PostImage{} err = json.Unmarshal(b, &image) data = image - case LINK_METADATA_TYPE_OPENGRAPH: + case LinkMetadataTypeOpengraph: og := &opengraph.OpenGraph{} json.Unmarshal(b, &og) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/manifest.go b/vendor/github.com/mattermost/mattermost-server/v6/model/manifest.go similarity index 89% rename from vendor/github.com/mattermost/mattermost-server/v5/model/manifest.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/manifest.go index 7c09830a..fda365f2 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/manifest.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/manifest.go @@ -6,7 +6,6 @@ package model import ( "encoding/json" "fmt" - "io" "io/ioutil" "os" "path/filepath" @@ -147,7 +146,7 @@ type Manifest struct { Id string `json:"id" yaml:"id"` // The name to be displayed for the plugin. - Name string `json:"name,omitempty" yaml:"name,omitempty"` + Name string `json:"name" yaml:"name"` // A description of what your plugin is and does. Description string `json:"description,omitempty" yaml:"description,omitempty"` @@ -176,9 +175,6 @@ type Manifest struct { // Server defines the server-side portion of your plugin. Server *ManifestServer `json:"server,omitempty" yaml:"server,omitempty"` - // Backend is a deprecated flag for defining the server-side portion of your plugin. Going forward, use Server instead. - Backend *ManifestServer `json:"backend,omitempty" yaml:"backend,omitempty"` - // If your plugin extends the web app, you'll need to define webapp. Webapp *ManifestWebapp `json:"webapp,omitempty" yaml:"webapp,omitempty"` @@ -191,14 +187,14 @@ type Manifest struct { // RequiredConfig defines any required server configuration fields for the plugin to function properly. // - // Use the plugin helpers CheckRequiredServerConfiguration method to enforce this. + // Use the pluginapi.Configuration.CheckRequiredServerConfiguration method to enforce this. RequiredConfig *Config `json:"required_configuration,omitempty" yaml:"required_configuration,omitempty"` } type ManifestServer struct { - // Executables are the paths to your executable binaries, specifying multiple entry points - // for different platforms when bundled together in a single plugin. - Executables *ManifestExecutables `json:"executables,omitempty" yaml:"executables,omitempty"` + // Executables are the paths to your executable binaries, specifying multiple entry + // points for different platforms when bundled together in a single plugin. + Executables map[string]string `json:"executables,omitempty" yaml:"executables,omitempty"` // Executable is the path to your executable binary. This should be relative to the root // of your bundle and the location of the manifest file. @@ -210,6 +206,8 @@ type ManifestServer struct { Executable string `json:"executable" yaml:"executable"` } +// Deprecated: ManifestExecutables is a legacy structure capturing a subset of the known platform executables. +// It will be remove in v7.0: https://mattermost.atlassian.net/browse/MM-40531 type ManifestExecutables struct { // LinuxAmd64 is the path to your executable binary for the corresponding platform LinuxAmd64 string `json:"linux-amd64,omitempty" yaml:"linux-amd64,omitempty"` @@ -229,28 +227,6 @@ type ManifestWebapp struct { BundleHash []byte `json:"-"` } -func (m *Manifest) ToJson() string { - b, _ := json.Marshal(m) - return string(b) -} - -func ManifestListToJson(m []*Manifest) string { - b, _ := json.Marshal(m) - return string(b) -} - -func ManifestFromJson(data io.Reader) *Manifest { - var m *Manifest - json.NewDecoder(data).Decode(&m) - return m -} - -func ManifestListFromJson(data io.Reader) []*Manifest { - var manifests []*Manifest - json.NewDecoder(data).Decode(&manifests) - return manifests -} - func (m *Manifest) HasClient() bool { return m.Webapp != nil } @@ -277,24 +253,14 @@ func (m *Manifest) ClientManifest() *Manifest { func (m *Manifest) GetExecutableForRuntime(goOs, goArch string) string { server := m.Server - // Support the deprecated backend parameter. - if server == nil { - server = m.Backend - } - if server == nil { return "" } var executable string - if server.Executables != nil { - if goOs == "linux" && goArch == "amd64" { - executable = server.Executables.LinuxAmd64 - } else if goOs == "darwin" && goArch == "amd64" { - executable = server.Executables.DarwinAmd64 - } else if goOs == "windows" && goArch == "amd64" { - executable = server.Executables.WindowsAmd64 - } + if len(server.Executables) > 0 { + osArch := fmt.Sprintf("%s-%s", goOs, goArch) + executable = server.Executables[osArch] } if executable == "" { @@ -305,7 +271,7 @@ func (m *Manifest) GetExecutableForRuntime(goOs, goArch string) string { } func (m *Manifest) HasServer() bool { - return m.Server != nil || m.Backend != nil + return m.Server != nil } func (m *Manifest) HasWebapp() bool { @@ -329,15 +295,19 @@ func (m *Manifest) IsValid() error { return errors.New("invalid plugin ID") } - if m.HomepageURL != "" && !IsValidHttpUrl(m.HomepageURL) { + if strings.TrimSpace(m.Name) == "" { + return errors.New("a plugin name is needed") + } + + if m.HomepageURL != "" && !IsValidHTTPURL(m.HomepageURL) { return errors.New("invalid HomepageURL") } - if m.SupportURL != "" && !IsValidHttpUrl(m.SupportURL) { + if m.SupportURL != "" && !IsValidHTTPURL(m.SupportURL) { return errors.New("invalid SupportURL") } - if m.ReleaseNotesURL != "" && !IsValidHttpUrl(m.ReleaseNotesURL) { + if m.ReleaseNotesURL != "" && !IsValidHTTPURL(m.ReleaseNotesURL) { return errors.New("invalid ReleaseNotesURL") } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/marketplace_plugin.go b/vendor/github.com/mattermost/mattermost-server/v6/model/marketplace_plugin.go similarity index 79% rename from vendor/github.com/mattermost/mattermost-server/v5/model/marketplace_plugin.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/marketplace_plugin.go index 47644513..8f0371b1 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/marketplace_plugin.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/marketplace_plugin.go @@ -20,8 +20,12 @@ type BaseMarketplacePlugin struct { IconData string `json:"icon_data"` DownloadURL string `json:"download_url"` ReleaseNotesURL string `json:"release_notes_url"` - Labels []MarketplaceLabel `json:"labels"` - Signature string `json:"signature"` // Signature represents a signature of a plugin saved in base64 encoding. + Labels []MarketplaceLabel `json:"labels,omitempty"` + Hosting string `json:"hosting"` // Indicated if the plugin is limited to a certain hosting type + AuthorType string `json:"author_type"` // The maintainer of the plugin + ReleaseStage string `json:"release_stage"` // The stage in the software release cycle that the plugin is in + Enterprise bool `json:"enterprise"` // Indicated if the plugin is an enterprise plugin + Signature string `json:"signature"` // Signature represents a signature of a plugin saved in base64 encoding. Manifest *Manifest `json:"manifest"` } @@ -80,7 +84,11 @@ type MarketplacePluginFilter struct { ServerVersion string BuildEnterpriseReady bool EnterprisePlugins bool + Cloud bool LocalOnly bool + Platform string + PluginId string + ReturnAllVersions bool } // ApplyToURL modifies the given url to include query string parameters for the request. @@ -94,7 +102,11 @@ func (filter *MarketplacePluginFilter) ApplyToURL(u *url.URL) { q.Add("server_version", filter.ServerVersion) q.Add("build_enterprise_ready", strconv.FormatBool(filter.BuildEnterpriseReady)) q.Add("enterprise_plugins", strconv.FormatBool(filter.EnterprisePlugins)) + q.Add("cloud", strconv.FormatBool(filter.Cloud)) q.Add("local_only", strconv.FormatBool(filter.LocalOnly)) + q.Add("platform", filter.Platform) + q.Add("plugin_id", filter.PluginId) + q.Add("return_all_versions", strconv.FormatBool(filter.ReturnAllVersions)) u.RawQuery = q.Encode() } @@ -113,12 +125,3 @@ func PluginRequestFromReader(reader io.Reader) (*InstallMarketplacePluginRequest } return r, nil } - -// ToJson method will return json from plugin request. -func (r *InstallMarketplacePluginRequest) ToJson() (string, error) { - b, err := json.Marshal(r) - if err != nil { - return "", err - } - return string(b), nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/member_invite.go b/vendor/github.com/mattermost/mattermost-server/v6/model/member_invite.go new file mode 100644 index 00000000..94258dbe --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/member_invite.go @@ -0,0 +1,49 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "net/http" +) + +type MemberInvite struct { + Emails []string `json:"emails"` + ChannelIds []string `json:"channelIds,omitempty"` + Message string `json:"message"` +} + +// IsValid validates that the invitation info is loaded correctly and with the correct structure +func (i *MemberInvite) IsValid() *AppError { + if len(i.Emails) == 0 { + return NewAppError("MemberInvite.IsValid", "model.member.is_valid.emails.app_error", nil, "", http.StatusBadRequest) + } + + if len(i.ChannelIds) > 0 { + for _, channel := range i.ChannelIds { + if len(channel) != 26 { + return NewAppError("MemberInvite.IsValid", "model.member.is_valid.channel.app_error", nil, "channel="+channel, http.StatusBadRequest) + } + } + } + + return nil +} + +func (i *MemberInvite) UnmarshalJSON(b []byte) error { + var emails []string + if err := json.Unmarshal(b, &emails); err == nil { + *i = MemberInvite{} + i.Emails = emails + return nil + } + + type TempMemberInvite MemberInvite + var o2 TempMemberInvite + if err := json.Unmarshal(b, &o2); err != nil { + return err + } + *i = MemberInvite(o2) + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/mention_map.go b/vendor/github.com/mattermost/mattermost-server/v6/model/mention_map.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/mention_map.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/mention_map.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/message_export.go b/vendor/github.com/mattermost/mattermost-server/v6/model/message_export.go similarity index 54% rename from vendor/github.com/mattermost/mattermost-server/v5/model/message_export.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/message_export.go index 88108e2e..cc8f882b 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/message_export.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/message_export.go @@ -3,6 +3,8 @@ package model +import "encoding/json" + type MessageExport struct { TeamId *string TeamName *string @@ -11,7 +13,7 @@ type MessageExport struct { ChannelId *string ChannelName *string ChannelDisplayName *string - ChannelType *string + ChannelType *ChannelType UserId *string UserEmail *string @@ -29,3 +31,20 @@ type MessageExport struct { PostOriginalId *string PostFileIds StringArray } + +type MessageExportCursor struct { + LastPostUpdateAt int64 + LastPostId string +} + +// PreviewID returns the value of the post's previewed_post prop, if present, or an empty string. +func (m *MessageExport) PreviewID() string { + var previewID string + props := map[string]interface{}{} + if m.PostProps != nil && json.Unmarshal([]byte(*m.PostProps), &props) == nil { + if val, ok := props[PostPropsPreviewedPost]; ok { + previewID = val.(string) + } + } + return previewID +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/mfa_secret.go b/vendor/github.com/mattermost/mattermost-server/v6/model/mfa_secret.go new file mode 100644 index 00000000..8cfa675e --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/mfa_secret.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type MfaSecret struct { + Secret string `json:"secret"` + QRCode string `json:"qr_code"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/migration.go b/vendor/github.com/mattermost/mattermost-server/v6/model/migration.go new file mode 100644 index 00000000..4958f80e --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/migration.go @@ -0,0 +1,41 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + AdvancedPermissionsMigrationKey = "AdvancedPermissionsMigrationComplete" + MigrationKeyAdvancedPermissionsPhase2 = "migration_advanced_permissions_phase_2" + + MigrationKeyEmojiPermissionsSplit = "emoji_permissions_split" + MigrationKeyWebhookPermissionsSplit = "webhook_permissions_split" + MigrationKeyListJoinPublicPrivateTeams = "list_join_public_private_teams" + MigrationKeyRemovePermanentDeleteUser = "remove_permanent_delete_user" + MigrationKeyAddBotPermissions = "add_bot_permissions" + MigrationKeyApplyChannelManageDeleteToChannelUser = "apply_channel_manage_delete_to_channel_user" + MigrationKeyRemoveChannelManageDeleteFromTeamUser = "remove_channel_manage_delete_from_team_user" + MigrationKeyViewMembersNewPermission = "view_members_new_permission" + MigrationKeyAddManageGuestsPermissions = "add_manage_guests_permissions" + MigrationKeyChannelModerationsPermissions = "channel_moderations_permissions" + MigrationKeyAddUseGroupMentionsPermission = "add_use_group_mentions_permission" + MigrationKeyAddSystemConsolePermissions = "add_system_console_permissions" + MigrationKeySidebarCategoriesPhase2 = "migration_sidebar_categories_phase_2" + MigrationKeyAddConvertChannelPermissions = "add_convert_channel_permissions" + MigrationKeyAddSystemRolesPermissions = "add_system_roles_permissions" + MigrationKeyAddBillingPermissions = "add_billing_permissions" + MigrationKeyAddManageSharedChannelPermissions = "manage_shared_channel_permissions" + MigrationKeyAddManageSecureConnectionsPermissions = "manage_secure_connections_permissions" + MigrationKeyAddDownloadComplianceExportResults = "download_compliance_export_results" + MigrationKeyAddComplianceSubsectionPermissions = "compliance_subsection_permissions" + MigrationKeyAddExperimentalSubsectionPermissions = "experimental_subsection_permissions" + MigrationKeyAddAuthenticationSubsectionPermissions = "authentication_subsection_permissions" + MigrationKeyAddSiteSubsectionPermissions = "site_subsection_permissions" + MigrationKeyAddEnvironmentSubsectionPermissions = "environment_subsection_permissions" + MigrationKeyAddReportingSubsectionPermissions = "reporting_subsection_permissions" + MigrationKeyAddTestEmailAncillaryPermission = "test_email_ancillary_permission" + MigrationKeyAddAboutSubsectionPermissions = "about_subsection_permissions" + MigrationKeyAddIntegrationsSubsectionPermissions = "integrations_subsection_permissions" + MigrationKeyAddPlaybooksPermissions = "playbooks_permissions" + MigrationKeyAddCustomUserGroupsPermissions = "custom_groups_permissions" + MigrationKeyAddPlayboosksManageRolesPermissions = "playbooks_manage_roles" +) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/oauth.go b/vendor/github.com/mattermost/mattermost-server/v6/model/oauth.go similarity index 67% rename from vendor/github.com/mattermost/mattermost-server/v5/model/oauth.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/oauth.go index 4a345a6e..82dd8fb2 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/oauth.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/oauth.go @@ -4,33 +4,32 @@ package model import ( - "encoding/json" "fmt" - "io" "net/http" "unicode/utf8" ) const ( - OAUTH_ACTION_SIGNUP = "signup" - OAUTH_ACTION_LOGIN = "login" - OAUTH_ACTION_EMAIL_TO_SSO = "email_to_sso" - OAUTH_ACTION_SSO_TO_EMAIL = "sso_to_email" - OAUTH_ACTION_MOBILE = "mobile" + OAuthActionSignup = "signup" + OAuthActionLogin = "login" + OAuthActionEmailToSSO = "email_to_sso" + OAuthActionSSOToEmail = "sso_to_email" + OAuthActionMobile = "mobile" ) type OAuthApp struct { - Id string `json:"id"` - CreatorId string `json:"creator_id"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - ClientSecret string `json:"client_secret"` - Name string `json:"name"` - Description string `json:"description"` - IconURL string `json:"icon_url"` - CallbackUrls StringArray `json:"callback_urls"` - Homepage string `json:"homepage"` - IsTrusted bool `json:"is_trusted"` + Id string `json:"id"` + CreatorId string `json:"creator_id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + ClientSecret string `json:"client_secret"` + Name string `json:"name"` + Description string `json:"description"` + IconURL string `json:"icon_url"` + CallbackUrls StringArray `json:"callback_urls"` + Homepage string `json:"homepage"` + IsTrusted bool `json:"is_trusted"` + MattermostAppID string `json:"mattermost_app_id"` } // IsValid validates the app and returns an error if it isn't configured @@ -53,11 +52,11 @@ func (a *OAuthApp) IsValid() *AppError { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.creator_id.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) } - if len(a.ClientSecret) == 0 || len(a.ClientSecret) > 128 { + if a.ClientSecret == "" || len(a.ClientSecret) > 128 { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.client_secret.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) } - if len(a.Name) == 0 || len(a.Name) > 64 { + if a.Name == "" || len(a.Name) > 64 { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.name.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) } @@ -66,12 +65,12 @@ func (a *OAuthApp) IsValid() *AppError { } for _, callback := range a.CallbackUrls { - if !IsValidHttpUrl(callback) { + if !IsValidHTTPURL(callback) { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.callback.app_error", nil, "", http.StatusBadRequest) } } - if len(a.Homepage) == 0 || len(a.Homepage) > 256 || !IsValidHttpUrl(a.Homepage) { + if a.Homepage == "" || len(a.Homepage) > 256 || !IsValidHTTPURL(a.Homepage) { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.homepage.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) } @@ -79,12 +78,16 @@ func (a *OAuthApp) IsValid() *AppError { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.description.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) } - if len(a.IconURL) > 0 { - if len(a.IconURL) > 512 || !IsValidHttpUrl(a.IconURL) { + if a.IconURL != "" { + if len(a.IconURL) > 512 || !IsValidHTTPURL(a.IconURL) { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.icon_url.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) } } + if len(a.MattermostAppID) > 32 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.mattermost_app_id.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + return nil } @@ -108,11 +111,6 @@ func (a *OAuthApp) PreUpdate() { a.UpdateAt = GetMillis() } -func (a *OAuthApp) ToJson() string { - b, _ := json.Marshal(a) - return string(b) -} - // Generate a valid strong etag so the browser can cache the results func (a *OAuthApp) Etag() string { return Etag(a.Id, a.UpdateAt) @@ -132,20 +130,3 @@ func (a *OAuthApp) IsValidRedirectURL(url string) bool { return false } - -func OAuthAppFromJson(data io.Reader) *OAuthApp { - var app *OAuthApp - json.NewDecoder(data).Decode(&app) - return app -} - -func OAuthAppListToJson(l []*OAuthApp) string { - b, _ := json.Marshal(l) - return string(b) -} - -func OAuthAppListFromJson(data io.Reader) []*OAuthApp { - var o []*OAuthApp - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/onboarding.go b/vendor/github.com/mattermost/mattermost-server/v6/model/onboarding.go new file mode 100644 index 00000000..9c83e376 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/onboarding.go @@ -0,0 +1,25 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +// CompleteOnboardingRequest describes parameters of the requested plugin. +type CompleteOnboardingRequest struct { + InstallPlugins []string `json:"install_plugins"` // InstallPlugins is a list of plugins to be installed +} + +// CompleteOnboardingRequest decodes a json-encoded request from the given io.Reader. +func CompleteOnboardingRequestFromReader(reader io.Reader) (*CompleteOnboardingRequest, error) { + var r *CompleteOnboardingRequest + err := json.NewDecoder(reader).Decode(&r) + if err != nil { + return nil, err + } + + return r, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go b/vendor/github.com/mattermost/mattermost-server/v6/model/outgoing_webhook.go similarity index 84% rename from vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/outgoing_webhook.go index d6cb2138..b24aed67 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/outgoing_webhook.go @@ -4,9 +4,7 @@ package model import ( - "encoding/json" "fmt" - "io" "net/http" "net/url" "strconv" @@ -57,12 +55,7 @@ type OutgoingWebhookResponse struct { ResponseType string `json:"response_type"` } -const OUTGOING_HOOK_RESPONSE_TYPE_COMMENT = "comment" - -func (o *OutgoingWebhookPayload) ToJSON() string { - b, _ := json.Marshal(o) - return string(b) -} +const OutgoingHookResponseTypeComment = "comment" func (o *OutgoingWebhookPayload) ToFormValues() string { v := url.Values{} @@ -82,42 +75,6 @@ func (o *OutgoingWebhookPayload) ToFormValues() string { return v.Encode() } -func (o *OutgoingWebhook) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func OutgoingWebhookFromJson(data io.Reader) *OutgoingWebhook { - var o *OutgoingWebhook - json.NewDecoder(data).Decode(&o) - return o -} - -func OutgoingWebhookListToJson(l []*OutgoingWebhook) string { - b, _ := json.Marshal(l) - return string(b) -} - -func OutgoingWebhookListFromJson(data io.Reader) []*OutgoingWebhook { - var o []*OutgoingWebhook - json.NewDecoder(data).Decode(&o) - return o -} - -func (o *OutgoingWebhookResponse) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func OutgoingWebhookResponseFromJson(data io.Reader) (*OutgoingWebhookResponse, error) { - var o *OutgoingWebhookResponse - err := json.NewDecoder(data).Decode(&o) - if err == io.EOF { - return nil, nil - } - return o, err -} - func (o *OutgoingWebhook) IsValid() *AppError { if !IsValidId(o.Id) { @@ -140,7 +97,7 @@ func (o *OutgoingWebhook) IsValid() *AppError { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.ChannelId) != 0 && !IsValidId(o.ChannelId) { + if o.ChannelId != "" && !IsValidId(o.ChannelId) { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) } @@ -154,7 +111,7 @@ func (o *OutgoingWebhook) IsValid() *AppError { if len(o.TriggerWords) != 0 { for _, triggerWord := range o.TriggerWords { - if len(triggerWord) == 0 { + if triggerWord == "" { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.trigger_words.app_error", nil, "", http.StatusBadRequest) } } @@ -165,7 +122,7 @@ func (o *OutgoingWebhook) IsValid() *AppError { } for _, callback := range o.CallbackURLs { - if !IsValidHttpUrl(callback) { + if !IsValidHTTPURL(callback) { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.url.app_error", nil, "", http.StatusBadRequest) } } @@ -215,7 +172,7 @@ func (o *OutgoingWebhook) PreUpdate() { } func (o *OutgoingWebhook) TriggerWordExactMatch(word string) bool { - if len(word) == 0 { + if word == "" { return false } @@ -229,7 +186,7 @@ func (o *OutgoingWebhook) TriggerWordExactMatch(word string) bool { } func (o *OutgoingWebhook) TriggerWordStartsWith(word string) bool { - if len(word) == 0 { + if word == "" { return false } @@ -243,7 +200,7 @@ func (o *OutgoingWebhook) TriggerWordStartsWith(word string) bool { } func (o *OutgoingWebhook) GetTriggerWord(word string, isExactMatch bool) (triggerWord string) { - if len(word) == 0 { + if word == "" { return } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/permalink.go b/vendor/github.com/mattermost/mattermost-server/v6/model/permalink.go new file mode 100644 index 00000000..12645646 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/permalink.go @@ -0,0 +1,31 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type Permalink struct { + PreviewPost *PreviewPost `json:"preview_post"` +} + +type PreviewPost struct { + PostID string `json:"post_id"` + Post *Post `json:"post"` + TeamName string `json:"team_name"` + ChannelDisplayName string `json:"channel_display_name"` + ChannelType ChannelType `json:"channel_type"` + ChannelID string `json:"channel_id"` +} + +func NewPreviewPost(post *Post, team *Team, channel *Channel) *PreviewPost { + if post == nil { + return nil + } + return &PreviewPost{ + PostID: post.Id, + Post: post, + TeamName: team.Name, + ChannelDisplayName: channel.DisplayName, + ChannelType: channel.Type, + ChannelID: channel.Id, + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/permission.go b/vendor/github.com/mattermost/mattermost-server/v6/model/permission.go new file mode 100644 index 00000000..bf1f4023 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/permission.go @@ -0,0 +1,2392 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + PermissionScopeSystem = "system_scope" + PermissionScopeTeam = "team_scope" + PermissionScopeChannel = "channel_scope" + PermissionScopeGroup = "group_scope" + PermissionScopePlaybook = "playbook_scope" + PermissionScopeRun = "run_scope" +) + +type Permission struct { + Id string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Scope string `json:"scope"` +} + +var PermissionInviteUser *Permission +var PermissionAddUserToTeam *Permission +var PermissionUseSlashCommands *Permission +var PermissionManageSlashCommands *Permission +var PermissionManageOthersSlashCommands *Permission +var PermissionCreatePublicChannel *Permission +var PermissionCreatePrivateChannel *Permission +var PermissionManagePublicChannelMembers *Permission +var PermissionManagePrivateChannelMembers *Permission +var PermissionConvertPublicChannelToPrivate *Permission +var PermissionConvertPrivateChannelToPublic *Permission +var PermissionAssignSystemAdminRole *Permission +var PermissionManageRoles *Permission +var PermissionManageTeamRoles *Permission +var PermissionManageChannelRoles *Permission +var PermissionCreateDirectChannel *Permission +var PermissionCreateGroupChannel *Permission +var PermissionManagePublicChannelProperties *Permission +var PermissionManagePrivateChannelProperties *Permission +var PermissionListPublicTeams *Permission +var PermissionJoinPublicTeams *Permission +var PermissionListPrivateTeams *Permission +var PermissionJoinPrivateTeams *Permission +var PermissionListTeamChannels *Permission +var PermissionJoinPublicChannels *Permission +var PermissionDeletePublicChannel *Permission +var PermissionDeletePrivateChannel *Permission +var PermissionEditOtherUsers *Permission +var PermissionReadChannel *Permission +var PermissionReadPublicChannelGroups *Permission +var PermissionReadPrivateChannelGroups *Permission +var PermissionReadPublicChannel *Permission +var PermissionAddReaction *Permission +var PermissionRemoveReaction *Permission +var PermissionRemoveOthersReactions *Permission +var PermissionPermanentDeleteUser *Permission +var PermissionUploadFile *Permission +var PermissionGetPublicLink *Permission +var PermissionManageWebhooks *Permission +var PermissionManageOthersWebhooks *Permission +var PermissionManageIncomingWebhooks *Permission +var PermissionManageOutgoingWebhooks *Permission +var PermissionManageOthersIncomingWebhooks *Permission +var PermissionManageOthersOutgoingWebhooks *Permission +var PermissionManageOAuth *Permission +var PermissionManageSystemWideOAuth *Permission +var PermissionManageEmojis *Permission +var PermissionManageOthersEmojis *Permission +var PermissionCreateEmojis *Permission +var PermissionDeleteEmojis *Permission +var PermissionDeleteOthersEmojis *Permission +var PermissionCreatePost *Permission +var PermissionCreatePostPublic *Permission +var PermissionCreatePostEphemeral *Permission +var PermissionEditPost *Permission +var PermissionEditOthersPosts *Permission +var PermissionDeletePost *Permission +var PermissionDeleteOthersPosts *Permission +var PermissionRemoveUserFromTeam *Permission +var PermissionCreateTeam *Permission +var PermissionManageTeam *Permission +var PermissionImportTeam *Permission +var PermissionViewTeam *Permission +var PermissionListUsersWithoutTeam *Permission +var PermissionReadJobs *Permission +var PermissionManageJobs *Permission +var PermissionCreateUserAccessToken *Permission +var PermissionReadUserAccessToken *Permission +var PermissionRevokeUserAccessToken *Permission +var PermissionCreateBot *Permission +var PermissionAssignBot *Permission +var PermissionReadBots *Permission +var PermissionReadOthersBots *Permission +var PermissionManageBots *Permission +var PermissionManageOthersBots *Permission +var PermissionViewMembers *Permission +var PermissionInviteGuest *Permission +var PermissionPromoteGuest *Permission +var PermissionDemoteToGuest *Permission +var PermissionUseChannelMentions *Permission +var PermissionUseGroupMentions *Permission +var PermissionReadOtherUsersTeams *Permission +var PermissionEditBrand *Permission +var PermissionManageSharedChannels *Permission +var PermissionManageSecureConnections *Permission +var PermissionDownloadComplianceExportResult *Permission +var PermissionCreateDataRetentionJob *Permission +var PermissionReadDataRetentionJob *Permission +var PermissionCreateComplianceExportJob *Permission +var PermissionReadComplianceExportJob *Permission +var PermissionReadAudits *Permission +var PermissionTestElasticsearch *Permission +var PermissionTestSiteURL *Permission +var PermissionTestS3 *Permission +var PermissionReloadConfig *Permission +var PermissionInvalidateCaches *Permission +var PermissionRecycleDatabaseConnections *Permission +var PermissionPurgeElasticsearchIndexes *Permission +var PermissionTestEmail *Permission +var PermissionCreateElasticsearchPostIndexingJob *Permission +var PermissionCreateElasticsearchPostAggregationJob *Permission +var PermissionReadElasticsearchPostIndexingJob *Permission +var PermissionReadElasticsearchPostAggregationJob *Permission +var PermissionPurgeBleveIndexes *Permission +var PermissionCreatePostBleveIndexesJob *Permission +var PermissionCreateLdapSyncJob *Permission +var PermissionReadLdapSyncJob *Permission +var PermissionTestLdap *Permission +var PermissionInvalidateEmailInvite *Permission +var PermissionGetSamlMetadataFromIdp *Permission +var PermissionAddSamlPublicCert *Permission +var PermissionAddSamlPrivateCert *Permission +var PermissionAddSamlIdpCert *Permission +var PermissionRemoveSamlPublicCert *Permission +var PermissionRemoveSamlPrivateCert *Permission +var PermissionRemoveSamlIdpCert *Permission +var PermissionGetSamlCertStatus *Permission +var PermissionAddLdapPublicCert *Permission +var PermissionAddLdapPrivateCert *Permission +var PermissionRemoveLdapPublicCert *Permission +var PermissionRemoveLdapPrivateCert *Permission +var PermissionGetLogs *Permission +var PermissionGetAnalytics *Permission +var PermissionReadLicenseInformation *Permission +var PermissionManageLicenseInformation *Permission + +var PermissionSysconsoleReadAbout *Permission +var PermissionSysconsoleWriteAbout *Permission + +var PermissionSysconsoleReadAboutEditionAndLicense *Permission +var PermissionSysconsoleWriteAboutEditionAndLicense *Permission + +var PermissionSysconsoleReadBilling *Permission +var PermissionSysconsoleWriteBilling *Permission + +var PermissionSysconsoleReadReporting *Permission +var PermissionSysconsoleWriteReporting *Permission + +var PermissionSysconsoleReadReportingSiteStatistics *Permission +var PermissionSysconsoleWriteReportingSiteStatistics *Permission + +var PermissionSysconsoleReadReportingTeamStatistics *Permission +var PermissionSysconsoleWriteReportingTeamStatistics *Permission + +var PermissionSysconsoleReadReportingServerLogs *Permission +var PermissionSysconsoleWriteReportingServerLogs *Permission + +var PermissionSysconsoleReadUserManagementUsers *Permission +var PermissionSysconsoleWriteUserManagementUsers *Permission + +var PermissionSysconsoleReadUserManagementGroups *Permission +var PermissionSysconsoleWriteUserManagementGroups *Permission + +var PermissionSysconsoleReadUserManagementTeams *Permission +var PermissionSysconsoleWriteUserManagementTeams *Permission + +var PermissionSysconsoleReadUserManagementChannels *Permission +var PermissionSysconsoleWriteUserManagementChannels *Permission + +var PermissionSysconsoleReadUserManagementPermissions *Permission +var PermissionSysconsoleWriteUserManagementPermissions *Permission + +var PermissionSysconsoleReadUserManagementSystemRoles *Permission +var PermissionSysconsoleWriteUserManagementSystemRoles *Permission + +// DEPRECATED +var PermissionSysconsoleReadEnvironment *Permission + +// DEPRECATED +var PermissionSysconsoleWriteEnvironment *Permission + +var PermissionSysconsoleReadEnvironmentWebServer *Permission +var PermissionSysconsoleWriteEnvironmentWebServer *Permission + +var PermissionSysconsoleReadEnvironmentDatabase *Permission +var PermissionSysconsoleWriteEnvironmentDatabase *Permission + +var PermissionSysconsoleReadEnvironmentElasticsearch *Permission +var PermissionSysconsoleWriteEnvironmentElasticsearch *Permission + +var PermissionSysconsoleReadEnvironmentFileStorage *Permission +var PermissionSysconsoleWriteEnvironmentFileStorage *Permission + +var PermissionSysconsoleReadEnvironmentImageProxy *Permission +var PermissionSysconsoleWriteEnvironmentImageProxy *Permission + +var PermissionSysconsoleReadEnvironmentSMTP *Permission +var PermissionSysconsoleWriteEnvironmentSMTP *Permission + +var PermissionSysconsoleReadEnvironmentPushNotificationServer *Permission +var PermissionSysconsoleWriteEnvironmentPushNotificationServer *Permission + +var PermissionSysconsoleReadEnvironmentHighAvailability *Permission +var PermissionSysconsoleWriteEnvironmentHighAvailability *Permission + +var PermissionSysconsoleReadEnvironmentRateLimiting *Permission +var PermissionSysconsoleWriteEnvironmentRateLimiting *Permission + +var PermissionSysconsoleReadEnvironmentLogging *Permission +var PermissionSysconsoleWriteEnvironmentLogging *Permission + +var PermissionSysconsoleReadEnvironmentSessionLengths *Permission +var PermissionSysconsoleWriteEnvironmentSessionLengths *Permission + +var PermissionSysconsoleReadEnvironmentPerformanceMonitoring *Permission +var PermissionSysconsoleWriteEnvironmentPerformanceMonitoring *Permission + +var PermissionSysconsoleReadEnvironmentDeveloper *Permission +var PermissionSysconsoleWriteEnvironmentDeveloper *Permission + +var PermissionSysconsoleReadSite *Permission +var PermissionSysconsoleWriteSite *Permission + +var PermissionSysconsoleReadSiteCustomization *Permission +var PermissionSysconsoleWriteSiteCustomization *Permission + +var PermissionSysconsoleReadSiteLocalization *Permission +var PermissionSysconsoleWriteSiteLocalization *Permission + +var PermissionSysconsoleReadSiteUsersAndTeams *Permission +var PermissionSysconsoleWriteSiteUsersAndTeams *Permission + +var PermissionSysconsoleReadSiteNotifications *Permission +var PermissionSysconsoleWriteSiteNotifications *Permission + +var PermissionSysconsoleReadSiteAnnouncementBanner *Permission +var PermissionSysconsoleWriteSiteAnnouncementBanner *Permission + +var PermissionSysconsoleReadSiteEmoji *Permission +var PermissionSysconsoleWriteSiteEmoji *Permission + +var PermissionSysconsoleReadSitePosts *Permission +var PermissionSysconsoleWriteSitePosts *Permission + +var PermissionSysconsoleReadSiteFileSharingAndDownloads *Permission +var PermissionSysconsoleWriteSiteFileSharingAndDownloads *Permission + +var PermissionSysconsoleReadSitePublicLinks *Permission +var PermissionSysconsoleWriteSitePublicLinks *Permission + +var PermissionSysconsoleReadSiteNotices *Permission +var PermissionSysconsoleWriteSiteNotices *Permission + +var PermissionSysconsoleReadAuthentication *Permission +var PermissionSysconsoleWriteAuthentication *Permission + +var PermissionSysconsoleReadAuthenticationSignup *Permission +var PermissionSysconsoleWriteAuthenticationSignup *Permission + +var PermissionSysconsoleReadAuthenticationEmail *Permission +var PermissionSysconsoleWriteAuthenticationEmail *Permission + +var PermissionSysconsoleReadAuthenticationPassword *Permission +var PermissionSysconsoleWriteAuthenticationPassword *Permission + +var PermissionSysconsoleReadAuthenticationMfa *Permission +var PermissionSysconsoleWriteAuthenticationMfa *Permission + +var PermissionSysconsoleReadAuthenticationLdap *Permission +var PermissionSysconsoleWriteAuthenticationLdap *Permission + +var PermissionSysconsoleReadAuthenticationSaml *Permission +var PermissionSysconsoleWriteAuthenticationSaml *Permission + +var PermissionSysconsoleReadAuthenticationOpenid *Permission +var PermissionSysconsoleWriteAuthenticationOpenid *Permission + +var PermissionSysconsoleReadAuthenticationGuestAccess *Permission +var PermissionSysconsoleWriteAuthenticationGuestAccess *Permission + +var PermissionSysconsoleReadPlugins *Permission +var PermissionSysconsoleWritePlugins *Permission + +var PermissionSysconsoleReadIntegrations *Permission +var PermissionSysconsoleWriteIntegrations *Permission + +var PermissionSysconsoleReadIntegrationsIntegrationManagement *Permission +var PermissionSysconsoleWriteIntegrationsIntegrationManagement *Permission + +var PermissionSysconsoleReadIntegrationsBotAccounts *Permission +var PermissionSysconsoleWriteIntegrationsBotAccounts *Permission + +var PermissionSysconsoleReadIntegrationsGif *Permission +var PermissionSysconsoleWriteIntegrationsGif *Permission + +var PermissionSysconsoleReadIntegrationsCors *Permission +var PermissionSysconsoleWriteIntegrationsCors *Permission + +var PermissionSysconsoleReadCompliance *Permission +var PermissionSysconsoleWriteCompliance *Permission + +var PermissionSysconsoleReadComplianceDataRetentionPolicy *Permission +var PermissionSysconsoleWriteComplianceDataRetentionPolicy *Permission + +var PermissionSysconsoleReadComplianceComplianceExport *Permission +var PermissionSysconsoleWriteComplianceComplianceExport *Permission + +var PermissionSysconsoleReadComplianceComplianceMonitoring *Permission +var PermissionSysconsoleWriteComplianceComplianceMonitoring *Permission + +var PermissionSysconsoleReadComplianceCustomTermsOfService *Permission +var PermissionSysconsoleWriteComplianceCustomTermsOfService *Permission + +var PermissionSysconsoleReadExperimental *Permission +var PermissionSysconsoleWriteExperimental *Permission + +var PermissionSysconsoleReadExperimentalFeatures *Permission +var PermissionSysconsoleWriteExperimentalFeatures *Permission + +var PermissionSysconsoleReadExperimentalFeatureFlags *Permission +var PermissionSysconsoleWriteExperimentalFeatureFlags *Permission + +var PermissionSysconsoleReadExperimentalBleve *Permission +var PermissionSysconsoleWriteExperimentalBleve *Permission + +var PermissionPublicPlaybookCreate *Permission +var PermissionPublicPlaybookManageProperties *Permission +var PermissionPublicPlaybookManageMembers *Permission +var PermissionPublicPlaybookManageRoles *Permission +var PermissionPublicPlaybookView *Permission +var PermissionPublicPlaybookMakePrivate *Permission + +var PermissionPrivatePlaybookCreate *Permission +var PermissionPrivatePlaybookManageProperties *Permission +var PermissionPrivatePlaybookManageMembers *Permission +var PermissionPrivatePlaybookManageRoles *Permission +var PermissionPrivatePlaybookView *Permission +var PermissionPrivatePlaybookMakePublic *Permission + +var PermissionRunCreate *Permission +var PermissionRunManageProperties *Permission +var PermissionRunManageMembers *Permission +var PermissionRunView *Permission + +// General permission that encompasses all system admin functions +// in the future this could be broken up to allow access to some +// admin functions but not others +var PermissionManageSystem *Permission + +var PermissionCreateCustomGroup *Permission +var PermissionManageCustomGroupMembers *Permission +var PermissionEditCustomGroup *Permission +var PermissionDeleteCustomGroup *Permission + +var AllPermissions []*Permission +var DeprecatedPermissions []*Permission + +var ChannelModeratedPermissions []string +var ChannelModeratedPermissionsMap map[string]string + +var SysconsoleReadPermissions []*Permission +var SysconsoleWritePermissions []*Permission + +func initializePermissions() { + PermissionInviteUser = &Permission{ + "invite_user", + "authentication.permissions.team_invite_user.name", + "authentication.permissions.team_invite_user.description", + PermissionScopeTeam, + } + PermissionAddUserToTeam = &Permission{ + "add_user_to_team", + "authentication.permissions.add_user_to_team.name", + "authentication.permissions.add_user_to_team.description", + PermissionScopeTeam, + } + PermissionUseSlashCommands = &Permission{ + "use_slash_commands", + "authentication.permissions.team_use_slash_commands.name", + "authentication.permissions.team_use_slash_commands.description", + PermissionScopeChannel, + } + PermissionManageSlashCommands = &Permission{ + "manage_slash_commands", + "authentication.permissions.manage_slash_commands.name", + "authentication.permissions.manage_slash_commands.description", + PermissionScopeTeam, + } + PermissionManageOthersSlashCommands = &Permission{ + "manage_others_slash_commands", + "authentication.permissions.manage_others_slash_commands.name", + "authentication.permissions.manage_others_slash_commands.description", + PermissionScopeTeam, + } + PermissionCreatePublicChannel = &Permission{ + "create_public_channel", + "authentication.permissions.create_public_channel.name", + "authentication.permissions.create_public_channel.description", + PermissionScopeTeam, + } + PermissionCreatePrivateChannel = &Permission{ + "create_private_channel", + "authentication.permissions.create_private_channel.name", + "authentication.permissions.create_private_channel.description", + PermissionScopeTeam, + } + PermissionManagePublicChannelMembers = &Permission{ + "manage_public_channel_members", + "authentication.permissions.manage_public_channel_members.name", + "authentication.permissions.manage_public_channel_members.description", + PermissionScopeChannel, + } + PermissionManagePrivateChannelMembers = &Permission{ + "manage_private_channel_members", + "authentication.permissions.manage_private_channel_members.name", + "authentication.permissions.manage_private_channel_members.description", + PermissionScopeChannel, + } + PermissionConvertPublicChannelToPrivate = &Permission{ + "convert_public_channel_to_private", + "authentication.permissions.convert_public_channel_to_private.name", + "authentication.permissions.convert_public_channel_to_private.description", + PermissionScopeChannel, + } + PermissionConvertPrivateChannelToPublic = &Permission{ + "convert_private_channel_to_public", + "authentication.permissions.convert_private_channel_to_public.name", + "authentication.permissions.convert_private_channel_to_public.description", + PermissionScopeChannel, + } + PermissionAssignSystemAdminRole = &Permission{ + "assign_system_admin_role", + "authentication.permissions.assign_system_admin_role.name", + "authentication.permissions.assign_system_admin_role.description", + PermissionScopeSystem, + } + PermissionManageRoles = &Permission{ + "manage_roles", + "authentication.permissions.manage_roles.name", + "authentication.permissions.manage_roles.description", + PermissionScopeSystem, + } + PermissionManageTeamRoles = &Permission{ + "manage_team_roles", + "authentication.permissions.manage_team_roles.name", + "authentication.permissions.manage_team_roles.description", + PermissionScopeTeam, + } + PermissionManageChannelRoles = &Permission{ + "manage_channel_roles", + "authentication.permissions.manage_channel_roles.name", + "authentication.permissions.manage_channel_roles.description", + PermissionScopeChannel, + } + PermissionManageSystem = &Permission{ + "manage_system", + "authentication.permissions.manage_system.name", + "authentication.permissions.manage_system.description", + PermissionScopeSystem, + } + PermissionCreateDirectChannel = &Permission{ + "create_direct_channel", + "authentication.permissions.create_direct_channel.name", + "authentication.permissions.create_direct_channel.description", + PermissionScopeSystem, + } + PermissionCreateGroupChannel = &Permission{ + "create_group_channel", + "authentication.permissions.create_group_channel.name", + "authentication.permissions.create_group_channel.description", + PermissionScopeSystem, + } + PermissionManagePublicChannelProperties = &Permission{ + "manage_public_channel_properties", + "authentication.permissions.manage_public_channel_properties.name", + "authentication.permissions.manage_public_channel_properties.description", + PermissionScopeChannel, + } + PermissionManagePrivateChannelProperties = &Permission{ + "manage_private_channel_properties", + "authentication.permissions.manage_private_channel_properties.name", + "authentication.permissions.manage_private_channel_properties.description", + PermissionScopeChannel, + } + PermissionListPublicTeams = &Permission{ + "list_public_teams", + "authentication.permissions.list_public_teams.name", + "authentication.permissions.list_public_teams.description", + PermissionScopeSystem, + } + PermissionJoinPublicTeams = &Permission{ + "join_public_teams", + "authentication.permissions.join_public_teams.name", + "authentication.permissions.join_public_teams.description", + PermissionScopeSystem, + } + PermissionListPrivateTeams = &Permission{ + "list_private_teams", + "authentication.permissions.list_private_teams.name", + "authentication.permissions.list_private_teams.description", + PermissionScopeSystem, + } + PermissionJoinPrivateTeams = &Permission{ + "join_private_teams", + "authentication.permissions.join_private_teams.name", + "authentication.permissions.join_private_teams.description", + PermissionScopeSystem, + } + PermissionListTeamChannels = &Permission{ + "list_team_channels", + "authentication.permissions.list_team_channels.name", + "authentication.permissions.list_team_channels.description", + PermissionScopeTeam, + } + PermissionJoinPublicChannels = &Permission{ + "join_public_channels", + "authentication.permissions.join_public_channels.name", + "authentication.permissions.join_public_channels.description", + PermissionScopeTeam, + } + PermissionDeletePublicChannel = &Permission{ + "delete_public_channel", + "authentication.permissions.delete_public_channel.name", + "authentication.permissions.delete_public_channel.description", + PermissionScopeChannel, + } + PermissionDeletePrivateChannel = &Permission{ + "delete_private_channel", + "authentication.permissions.delete_private_channel.name", + "authentication.permissions.delete_private_channel.description", + PermissionScopeChannel, + } + PermissionEditOtherUsers = &Permission{ + "edit_other_users", + "authentication.permissions.edit_other_users.name", + "authentication.permissions.edit_other_users.description", + PermissionScopeSystem, + } + PermissionReadChannel = &Permission{ + "read_channel", + "authentication.permissions.read_channel.name", + "authentication.permissions.read_channel.description", + PermissionScopeChannel, + } + PermissionReadPublicChannelGroups = &Permission{ + "read_public_channel_groups", + "authentication.permissions.read_public_channel_groups.name", + "authentication.permissions.read_public_channel_groups.description", + PermissionScopeChannel, + } + PermissionReadPrivateChannelGroups = &Permission{ + "read_private_channel_groups", + "authentication.permissions.read_private_channel_groups.name", + "authentication.permissions.read_private_channel_groups.description", + PermissionScopeChannel, + } + PermissionReadPublicChannel = &Permission{ + "read_public_channel", + "authentication.permissions.read_public_channel.name", + "authentication.permissions.read_public_channel.description", + PermissionScopeTeam, + } + PermissionAddReaction = &Permission{ + "add_reaction", + "authentication.permissions.add_reaction.name", + "authentication.permissions.add_reaction.description", + PermissionScopeChannel, + } + PermissionRemoveReaction = &Permission{ + "remove_reaction", + "authentication.permissions.remove_reaction.name", + "authentication.permissions.remove_reaction.description", + PermissionScopeChannel, + } + PermissionRemoveOthersReactions = &Permission{ + "remove_others_reactions", + "authentication.permissions.remove_others_reactions.name", + "authentication.permissions.remove_others_reactions.description", + PermissionScopeChannel, + } + // DEPRECATED + PermissionPermanentDeleteUser = &Permission{ + "permanent_delete_user", + "authentication.permissions.permanent_delete_user.name", + "authentication.permissions.permanent_delete_user.description", + PermissionScopeSystem, + } + PermissionUploadFile = &Permission{ + "upload_file", + "authentication.permissions.upload_file.name", + "authentication.permissions.upload_file.description", + PermissionScopeChannel, + } + PermissionGetPublicLink = &Permission{ + "get_public_link", + "authentication.permissions.get_public_link.name", + "authentication.permissions.get_public_link.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionManageWebhooks = &Permission{ + "manage_webhooks", + "authentication.permissions.manage_webhooks.name", + "authentication.permissions.manage_webhooks.description", + PermissionScopeTeam, + } + // DEPRECATED + PermissionManageOthersWebhooks = &Permission{ + "manage_others_webhooks", + "authentication.permissions.manage_others_webhooks.name", + "authentication.permissions.manage_others_webhooks.description", + PermissionScopeTeam, + } + PermissionManageIncomingWebhooks = &Permission{ + "manage_incoming_webhooks", + "authentication.permissions.manage_incoming_webhooks.name", + "authentication.permissions.manage_incoming_webhooks.description", + PermissionScopeTeam, + } + PermissionManageOutgoingWebhooks = &Permission{ + "manage_outgoing_webhooks", + "authentication.permissions.manage_outgoing_webhooks.name", + "authentication.permissions.manage_outgoing_webhooks.description", + PermissionScopeTeam, + } + PermissionManageOthersIncomingWebhooks = &Permission{ + "manage_others_incoming_webhooks", + "authentication.permissions.manage_others_incoming_webhooks.name", + "authentication.permissions.manage_others_incoming_webhooks.description", + PermissionScopeTeam, + } + PermissionManageOthersOutgoingWebhooks = &Permission{ + "manage_others_outgoing_webhooks", + "authentication.permissions.manage_others_outgoing_webhooks.name", + "authentication.permissions.manage_others_outgoing_webhooks.description", + PermissionScopeTeam, + } + PermissionManageOAuth = &Permission{ + "manage_oauth", + "authentication.permissions.manage_oauth.name", + "authentication.permissions.manage_oauth.description", + PermissionScopeSystem, + } + PermissionManageSystemWideOAuth = &Permission{ + "manage_system_wide_oauth", + "authentication.permissions.manage_system_wide_oauth.name", + "authentication.permissions.manage_system_wide_oauth.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionManageEmojis = &Permission{ + "manage_emojis", + "authentication.permissions.manage_emojis.name", + "authentication.permissions.manage_emojis.description", + PermissionScopeTeam, + } + // DEPRECATED + PermissionManageOthersEmojis = &Permission{ + "manage_others_emojis", + "authentication.permissions.manage_others_emojis.name", + "authentication.permissions.manage_others_emojis.description", + PermissionScopeTeam, + } + PermissionCreateEmojis = &Permission{ + "create_emojis", + "authentication.permissions.create_emojis.name", + "authentication.permissions.create_emojis.description", + PermissionScopeTeam, + } + PermissionDeleteEmojis = &Permission{ + "delete_emojis", + "authentication.permissions.delete_emojis.name", + "authentication.permissions.delete_emojis.description", + PermissionScopeTeam, + } + PermissionDeleteOthersEmojis = &Permission{ + "delete_others_emojis", + "authentication.permissions.delete_others_emojis.name", + "authentication.permissions.delete_others_emojis.description", + PermissionScopeTeam, + } + PermissionCreatePost = &Permission{ + "create_post", + "authentication.permissions.create_post.name", + "authentication.permissions.create_post.description", + PermissionScopeChannel, + } + PermissionCreatePostPublic = &Permission{ + "create_post_public", + "authentication.permissions.create_post_public.name", + "authentication.permissions.create_post_public.description", + PermissionScopeChannel, + } + PermissionCreatePostEphemeral = &Permission{ + "create_post_ephemeral", + "authentication.permissions.create_post_ephemeral.name", + "authentication.permissions.create_post_ephemeral.description", + PermissionScopeChannel, + } + PermissionEditPost = &Permission{ + "edit_post", + "authentication.permissions.edit_post.name", + "authentication.permissions.edit_post.description", + PermissionScopeChannel, + } + PermissionEditOthersPosts = &Permission{ + "edit_others_posts", + "authentication.permissions.edit_others_posts.name", + "authentication.permissions.edit_others_posts.description", + PermissionScopeChannel, + } + PermissionDeletePost = &Permission{ + "delete_post", + "authentication.permissions.delete_post.name", + "authentication.permissions.delete_post.description", + PermissionScopeChannel, + } + PermissionDeleteOthersPosts = &Permission{ + "delete_others_posts", + "authentication.permissions.delete_others_posts.name", + "authentication.permissions.delete_others_posts.description", + PermissionScopeChannel, + } + PermissionManageSharedChannels = &Permission{ + "manage_shared_channels", + "authentication.permissions.manage_shared_channels.name", + "authentication.permissions.manage_shared_channels.description", + PermissionScopeSystem, + } + PermissionManageSecureConnections = &Permission{ + "manage_secure_connections", + "authentication.permissions.manage_secure_connections.name", + "authentication.permissions.manage_secure_connections.description", + PermissionScopeSystem, + } + + PermissionCreateDataRetentionJob = &Permission{ + "create_data_retention_job", + "", + "", + PermissionScopeSystem, + } + PermissionReadDataRetentionJob = &Permission{ + "read_data_retention_job", + "", + "", + PermissionScopeSystem, + } + + PermissionCreateComplianceExportJob = &Permission{ + "create_compliance_export_job", + "", + "", + PermissionScopeSystem, + } + PermissionReadComplianceExportJob = &Permission{ + "read_compliance_export_job", + "", + "", + PermissionScopeSystem, + } + + PermissionReadAudits = &Permission{ + "read_audits", + "", + "", + PermissionScopeSystem, + } + + PermissionPurgeBleveIndexes = &Permission{ + "purge_bleve_indexes", + "", + "", + PermissionScopeSystem, + } + + PermissionCreatePostBleveIndexesJob = &Permission{ + "create_post_bleve_indexes_job", + "", + "", + PermissionScopeSystem, + } + + PermissionCreateLdapSyncJob = &Permission{ + "create_ldap_sync_job", + "", + "", + PermissionScopeSystem, + } + PermissionReadLdapSyncJob = &Permission{ + "read_ldap_sync_job", + "", + "", + PermissionScopeSystem, + } + + PermissionTestLdap = &Permission{ + "test_ldap", + "", + "", + PermissionScopeSystem, + } + + PermissionInvalidateEmailInvite = &Permission{ + "invalidate_email_invite", + "", + "", + PermissionScopeSystem, + } + PermissionGetSamlMetadataFromIdp = &Permission{ + "get_saml_metadata_from_idp", + "", + "", + PermissionScopeSystem, + } + PermissionAddSamlPublicCert = &Permission{ + "add_saml_public_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionAddSamlPrivateCert = &Permission{ + "add_saml_private_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionAddSamlIdpCert = &Permission{ + "add_saml_idp_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionRemoveSamlPublicCert = &Permission{ + "remove_saml_public_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionRemoveSamlPrivateCert = &Permission{ + "remove_saml_private_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionRemoveSamlIdpCert = &Permission{ + "remove_saml_idp_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionGetSamlCertStatus = &Permission{ + "get_saml_cert_status", + "", + "", + PermissionScopeSystem, + } + + PermissionAddLdapPublicCert = &Permission{ + "add_ldap_public_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionAddLdapPrivateCert = &Permission{ + "add_ldap_private_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionRemoveLdapPublicCert = &Permission{ + "remove_ldap_public_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionRemoveLdapPrivateCert = &Permission{ + "remove_ldap_private_cert", + "", + "", + PermissionScopeSystem, + } + + PermissionGetLogs = &Permission{ + "get_logs", + "", + "", + PermissionScopeSystem, + } + + PermissionReadLicenseInformation = &Permission{ + "read_license_information", + "", + "", + PermissionScopeSystem, + } + + PermissionGetAnalytics = &Permission{ + "get_analytics", + "", + "", + PermissionScopeSystem, + } + + PermissionManageLicenseInformation = &Permission{ + "manage_license_information", + "", + "", + PermissionScopeSystem, + } + + PermissionDownloadComplianceExportResult = &Permission{ + "download_compliance_export_result", + "authentication.permissions.download_compliance_export_result.name", + "authentication.permissions.download_compliance_export_result.description", + PermissionScopeSystem, + } + + PermissionTestSiteURL = &Permission{ + "test_site_url", + "", + "", + PermissionScopeSystem, + } + PermissionTestElasticsearch = &Permission{ + "test_elasticsearch", + "", + "", + PermissionScopeSystem, + } + PermissionTestS3 = &Permission{ + "test_s3", + "", + "", + PermissionScopeSystem, + } + PermissionReloadConfig = &Permission{ + "reload_config", + "", + "", + PermissionScopeSystem, + } + PermissionInvalidateCaches = &Permission{ + "invalidate_caches", + "", + "", + PermissionScopeSystem, + } + PermissionRecycleDatabaseConnections = &Permission{ + "recycle_database_connections", + "", + "", + PermissionScopeSystem, + } + PermissionPurgeElasticsearchIndexes = &Permission{ + "purge_elasticsearch_indexes", + "", + "", + PermissionScopeSystem, + } + PermissionTestEmail = &Permission{ + "test_email", + "", + "", + PermissionScopeSystem, + } + PermissionCreateElasticsearchPostIndexingJob = &Permission{ + "create_elasticsearch_post_indexing_job", + "", + "", + PermissionScopeSystem, + } + PermissionCreateElasticsearchPostAggregationJob = &Permission{ + "create_elasticsearch_post_aggregation_job", + "", + "", + PermissionScopeSystem, + } + PermissionReadElasticsearchPostIndexingJob = &Permission{ + "read_elasticsearch_post_indexing_job", + "", + "", + PermissionScopeSystem, + } + PermissionReadElasticsearchPostAggregationJob = &Permission{ + "read_elasticsearch_post_aggregation_job", + "", + "", + PermissionScopeSystem, + } + + PermissionRemoveUserFromTeam = &Permission{ + "remove_user_from_team", + "authentication.permissions.remove_user_from_team.name", + "authentication.permissions.remove_user_from_team.description", + PermissionScopeTeam, + } + PermissionCreateTeam = &Permission{ + "create_team", + "authentication.permissions.create_team.name", + "authentication.permissions.create_team.description", + PermissionScopeSystem, + } + PermissionManageTeam = &Permission{ + "manage_team", + "authentication.permissions.manage_team.name", + "authentication.permissions.manage_team.description", + PermissionScopeTeam, + } + PermissionImportTeam = &Permission{ + "import_team", + "authentication.permissions.import_team.name", + "authentication.permissions.import_team.description", + PermissionScopeTeam, + } + PermissionViewTeam = &Permission{ + "view_team", + "authentication.permissions.view_team.name", + "authentication.permissions.view_team.description", + PermissionScopeTeam, + } + PermissionListUsersWithoutTeam = &Permission{ + "list_users_without_team", + "authentication.permissions.list_users_without_team.name", + "authentication.permissions.list_users_without_team.description", + PermissionScopeSystem, + } + PermissionCreateUserAccessToken = &Permission{ + "create_user_access_token", + "authentication.permissions.create_user_access_token.name", + "authentication.permissions.create_user_access_token.description", + PermissionScopeSystem, + } + PermissionReadUserAccessToken = &Permission{ + "read_user_access_token", + "authentication.permissions.read_user_access_token.name", + "authentication.permissions.read_user_access_token.description", + PermissionScopeSystem, + } + PermissionRevokeUserAccessToken = &Permission{ + "revoke_user_access_token", + "authentication.permissions.revoke_user_access_token.name", + "authentication.permissions.revoke_user_access_token.description", + PermissionScopeSystem, + } + PermissionCreateBot = &Permission{ + "create_bot", + "authentication.permissions.create_bot.name", + "authentication.permissions.create_bot.description", + PermissionScopeSystem, + } + PermissionAssignBot = &Permission{ + "assign_bot", + "authentication.permissions.assign_bot.name", + "authentication.permissions.assign_bot.description", + PermissionScopeSystem, + } + PermissionReadBots = &Permission{ + "read_bots", + "authentication.permissions.read_bots.name", + "authentication.permissions.read_bots.description", + PermissionScopeSystem, + } + PermissionReadOthersBots = &Permission{ + "read_others_bots", + "authentication.permissions.read_others_bots.name", + "authentication.permissions.read_others_bots.description", + PermissionScopeSystem, + } + PermissionManageBots = &Permission{ + "manage_bots", + "authentication.permissions.manage_bots.name", + "authentication.permissions.manage_bots.description", + PermissionScopeSystem, + } + PermissionManageOthersBots = &Permission{ + "manage_others_bots", + "authentication.permissions.manage_others_bots.name", + "authentication.permissions.manage_others_bots.description", + PermissionScopeSystem, + } + PermissionReadJobs = &Permission{ + "read_jobs", + "authentication.permisssions.read_jobs.name", + "authentication.permisssions.read_jobs.description", + PermissionScopeSystem, + } + PermissionManageJobs = &Permission{ + "manage_jobs", + "authentication.permisssions.manage_jobs.name", + "authentication.permisssions.manage_jobs.description", + PermissionScopeSystem, + } + PermissionViewMembers = &Permission{ + "view_members", + "authentication.permisssions.view_members.name", + "authentication.permisssions.view_members.description", + PermissionScopeTeam, + } + PermissionInviteGuest = &Permission{ + "invite_guest", + "authentication.permissions.invite_guest.name", + "authentication.permissions.invite_guest.description", + PermissionScopeTeam, + } + PermissionPromoteGuest = &Permission{ + "promote_guest", + "authentication.permissions.promote_guest.name", + "authentication.permissions.promote_guest.description", + PermissionScopeSystem, + } + PermissionDemoteToGuest = &Permission{ + "demote_to_guest", + "authentication.permissions.demote_to_guest.name", + "authentication.permissions.demote_to_guest.description", + PermissionScopeSystem, + } + PermissionUseChannelMentions = &Permission{ + "use_channel_mentions", + "authentication.permissions.use_channel_mentions.name", + "authentication.permissions.use_channel_mentions.description", + PermissionScopeChannel, + } + PermissionUseGroupMentions = &Permission{ + "use_group_mentions", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeChannel, + } + PermissionReadOtherUsersTeams = &Permission{ + "read_other_users_teams", + "authentication.permissions.read_other_users_teams.name", + "authentication.permissions.read_other_users_teams.description", + PermissionScopeSystem, + } + PermissionEditBrand = &Permission{ + "edit_brand", + "authentication.permissions.edit_brand.name", + "authentication.permissions.edit_brand.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleReadAbout = &Permission{ + "sysconsole_read_about", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleWriteAbout = &Permission{ + "sysconsole_write_about", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadAboutEditionAndLicense = &Permission{ + "sysconsole_read_about_edition_and_license", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAboutEditionAndLicense = &Permission{ + "sysconsole_write_about_edition_and_license", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadBilling = &Permission{ + "sysconsole_read_billing", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteBilling = &Permission{ + "sysconsole_write_billing", + "", + "", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleReadReporting = &Permission{ + "sysconsole_read_reporting", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleWriteReporting = &Permission{ + "sysconsole_write_reporting", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadReportingSiteStatistics = &Permission{ + "sysconsole_read_reporting_site_statistics", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteReportingSiteStatistics = &Permission{ + "sysconsole_write_reporting_site_statistics", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadReportingTeamStatistics = &Permission{ + "sysconsole_read_reporting_team_statistics", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteReportingTeamStatistics = &Permission{ + "sysconsole_write_reporting_team_statistics", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadReportingServerLogs = &Permission{ + "sysconsole_read_reporting_server_logs", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteReportingServerLogs = &Permission{ + "sysconsole_write_reporting_server_logs", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadUserManagementUsers = &Permission{ + "sysconsole_read_user_management_users", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleWriteUserManagementUsers = &Permission{ + "sysconsole_write_user_management_users", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadUserManagementGroups = &Permission{ + "sysconsole_read_user_management_groups", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleWriteUserManagementGroups = &Permission{ + "sysconsole_write_user_management_groups", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadUserManagementTeams = &Permission{ + "sysconsole_read_user_management_teams", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleWriteUserManagementTeams = &Permission{ + "sysconsole_write_user_management_teams", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadUserManagementChannels = &Permission{ + "sysconsole_read_user_management_channels", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleWriteUserManagementChannels = &Permission{ + "sysconsole_write_user_management_channels", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadUserManagementPermissions = &Permission{ + "sysconsole_read_user_management_permissions", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleWriteUserManagementPermissions = &Permission{ + "sysconsole_write_user_management_permissions", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadUserManagementSystemRoles = &Permission{ + "sysconsole_read_user_management_system_roles", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleWriteUserManagementSystemRoles = &Permission{ + "sysconsole_write_user_management_system_roles", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleReadEnvironment = &Permission{ + "sysconsole_read_environment", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleWriteEnvironment = &Permission{ + "sysconsole_write_environment", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentWebServer = &Permission{ + "sysconsole_read_environment_web_server", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentWebServer = &Permission{ + "sysconsole_write_environment_web_server", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentDatabase = &Permission{ + "sysconsole_read_environment_database", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentDatabase = &Permission{ + "sysconsole_write_environment_database", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentElasticsearch = &Permission{ + "sysconsole_read_environment_elasticsearch", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentElasticsearch = &Permission{ + "sysconsole_write_environment_elasticsearch", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentFileStorage = &Permission{ + "sysconsole_read_environment_file_storage", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentFileStorage = &Permission{ + "sysconsole_write_environment_file_storage", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentImageProxy = &Permission{ + "sysconsole_read_environment_image_proxy", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentImageProxy = &Permission{ + "sysconsole_write_environment_image_proxy", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentSMTP = &Permission{ + "sysconsole_read_environment_smtp", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentSMTP = &Permission{ + "sysconsole_write_environment_smtp", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentPushNotificationServer = &Permission{ + "sysconsole_read_environment_push_notification_server", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentPushNotificationServer = &Permission{ + "sysconsole_write_environment_push_notification_server", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentHighAvailability = &Permission{ + "sysconsole_read_environment_high_availability", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentHighAvailability = &Permission{ + "sysconsole_write_environment_high_availability", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentRateLimiting = &Permission{ + "sysconsole_read_environment_rate_limiting", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentRateLimiting = &Permission{ + "sysconsole_write_environment_rate_limiting", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentLogging = &Permission{ + "sysconsole_read_environment_logging", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentLogging = &Permission{ + "sysconsole_write_environment_logging", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentSessionLengths = &Permission{ + "sysconsole_read_environment_session_lengths", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentSessionLengths = &Permission{ + "sysconsole_write_environment_session_lengths", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentPerformanceMonitoring = &Permission{ + "sysconsole_read_environment_performance_monitoring", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentPerformanceMonitoring = &Permission{ + "sysconsole_write_environment_performance_monitoring", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadEnvironmentDeveloper = &Permission{ + "sysconsole_read_environment_developer", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteEnvironmentDeveloper = &Permission{ + "sysconsole_write_environment_developer", + "", + "", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleReadSite = &Permission{ + "sysconsole_read_site", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleWriteSite = &Permission{ + "sysconsole_write_site", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + + PermissionSysconsoleReadSiteCustomization = &Permission{ + "sysconsole_read_site_customization", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSiteCustomization = &Permission{ + "sysconsole_write_site_customization", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSiteLocalization = &Permission{ + "sysconsole_read_site_localization", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSiteLocalization = &Permission{ + "sysconsole_write_site_localization", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSiteUsersAndTeams = &Permission{ + "sysconsole_read_site_users_and_teams", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSiteUsersAndTeams = &Permission{ + "sysconsole_write_site_users_and_teams", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSiteNotifications = &Permission{ + "sysconsole_read_site_notifications", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSiteNotifications = &Permission{ + "sysconsole_write_site_notifications", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSiteAnnouncementBanner = &Permission{ + "sysconsole_read_site_announcement_banner", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSiteAnnouncementBanner = &Permission{ + "sysconsole_write_site_announcement_banner", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSiteEmoji = &Permission{ + "sysconsole_read_site_emoji", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSiteEmoji = &Permission{ + "sysconsole_write_site_emoji", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSitePosts = &Permission{ + "sysconsole_read_site_posts", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSitePosts = &Permission{ + "sysconsole_write_site_posts", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSiteFileSharingAndDownloads = &Permission{ + "sysconsole_read_site_file_sharing_and_downloads", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSiteFileSharingAndDownloads = &Permission{ + "sysconsole_write_site_file_sharing_and_downloads", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSitePublicLinks = &Permission{ + "sysconsole_read_site_public_links", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSitePublicLinks = &Permission{ + "sysconsole_write_site_public_links", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadSiteNotices = &Permission{ + "sysconsole_read_site_notices", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteSiteNotices = &Permission{ + "sysconsole_write_site_notices", + "", + "", + PermissionScopeSystem, + } + + // Deprecated + PermissionSysconsoleReadAuthentication = &Permission{ + "sysconsole_read_authentication", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // Deprecated + PermissionSysconsoleWriteAuthentication = &Permission{ + "sysconsole_write_authentication", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadAuthenticationSignup = &Permission{ + "sysconsole_read_authentication_signup", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAuthenticationSignup = &Permission{ + "sysconsole_write_authentication_signup", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadAuthenticationEmail = &Permission{ + "sysconsole_read_authentication_email", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAuthenticationEmail = &Permission{ + "sysconsole_write_authentication_email", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadAuthenticationPassword = &Permission{ + "sysconsole_read_authentication_password", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAuthenticationPassword = &Permission{ + "sysconsole_write_authentication_password", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadAuthenticationMfa = &Permission{ + "sysconsole_read_authentication_mfa", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAuthenticationMfa = &Permission{ + "sysconsole_write_authentication_mfa", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadAuthenticationLdap = &Permission{ + "sysconsole_read_authentication_ldap", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAuthenticationLdap = &Permission{ + "sysconsole_write_authentication_ldap", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadAuthenticationSaml = &Permission{ + "sysconsole_read_authentication_saml", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAuthenticationSaml = &Permission{ + "sysconsole_write_authentication_saml", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadAuthenticationOpenid = &Permission{ + "sysconsole_read_authentication_openid", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAuthenticationOpenid = &Permission{ + "sysconsole_write_authentication_openid", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadAuthenticationGuestAccess = &Permission{ + "sysconsole_read_authentication_guest_access", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteAuthenticationGuestAccess = &Permission{ + "sysconsole_write_authentication_guest_access", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadPlugins = &Permission{ + "sysconsole_read_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleWritePlugins = &Permission{ + "sysconsole_write_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleReadIntegrations = &Permission{ + "sysconsole_read_integrations", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleWriteIntegrations = &Permission{ + "sysconsole_write_integrations", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadIntegrationsIntegrationManagement = &Permission{ + "sysconsole_read_integrations_integration_management", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteIntegrationsIntegrationManagement = &Permission{ + "sysconsole_write_integrations_integration_management", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadIntegrationsBotAccounts = &Permission{ + "sysconsole_read_integrations_bot_accounts", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteIntegrationsBotAccounts = &Permission{ + "sysconsole_write_integrations_bot_accounts", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadIntegrationsGif = &Permission{ + "sysconsole_read_integrations_gif", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteIntegrationsGif = &Permission{ + "sysconsole_write_integrations_gif", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadIntegrationsCors = &Permission{ + "sysconsole_read_integrations_cors", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteIntegrationsCors = &Permission{ + "sysconsole_write_integrations_cors", + "", + "", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleReadCompliance = &Permission{ + "sysconsole_read_compliance", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleWriteCompliance = &Permission{ + "sysconsole_write_compliance", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadComplianceDataRetentionPolicy = &Permission{ + "sysconsole_read_compliance_data_retention_policy", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteComplianceDataRetentionPolicy = &Permission{ + "sysconsole_write_compliance_data_retention_policy", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadComplianceComplianceExport = &Permission{ + "sysconsole_read_compliance_compliance_export", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteComplianceComplianceExport = &Permission{ + "sysconsole_write_compliance_compliance_export", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadComplianceComplianceMonitoring = &Permission{ + "sysconsole_read_compliance_compliance_monitoring", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteComplianceComplianceMonitoring = &Permission{ + "sysconsole_write_compliance_compliance_monitoring", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadComplianceCustomTermsOfService = &Permission{ + "sysconsole_read_compliance_custom_terms_of_service", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteComplianceCustomTermsOfService = &Permission{ + "sysconsole_write_compliance_custom_terms_of_service", + "", + "", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleReadExperimental = &Permission{ + "sysconsole_read_experimental", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + // DEPRECATED + PermissionSysconsoleWriteExperimental = &Permission{ + "sysconsole_write_experimental", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PermissionSysconsoleReadExperimentalFeatures = &Permission{ + "sysconsole_read_experimental_features", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteExperimentalFeatures = &Permission{ + "sysconsole_write_experimental_features", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadExperimentalFeatureFlags = &Permission{ + "sysconsole_read_experimental_feature_flags", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteExperimentalFeatureFlags = &Permission{ + "sysconsole_write_experimental_feature_flags", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleReadExperimentalBleve = &Permission{ + "sysconsole_read_experimental_bleve", + "", + "", + PermissionScopeSystem, + } + PermissionSysconsoleWriteExperimentalBleve = &Permission{ + "sysconsole_write_experimental_bleve", + "", + "", + PermissionScopeSystem, + } + + PermissionCreateCustomGroup = &Permission{ + "create_custom_group", + "authentication.permissions.create_custom_group.name", + "authentication.permissions.create_custom_group.description", + PermissionScopeSystem, + } + + PermissionManageCustomGroupMembers = &Permission{ + "manage_custom_group_members", + "authentication.permissions.manage_custom_group_members.name", + "authentication.permissions.manage_custom_group_members.description", + PermissionScopeGroup, + } + + PermissionEditCustomGroup = &Permission{ + "edit_custom_group", + "authentication.permissions.edit_custom_group.name", + "authentication.permissions.edit_custom_group.description", + PermissionScopeGroup, + } + + PermissionDeleteCustomGroup = &Permission{ + "delete_custom_group", + "authentication.permissions.delete_custom_group.name", + "authentication.permissions.delete_custom_group.description", + PermissionScopeGroup, + } + + // Playbooks + PermissionPublicPlaybookCreate = &Permission{ + "playbook_public_create", + "", + "", + PermissionScopeTeam, + } + + PermissionPublicPlaybookManageProperties = &Permission{ + "playbook_public_manage_properties", + "", + "", + PermissionScopePlaybook, + } + + PermissionPublicPlaybookManageMembers = &Permission{ + "playbook_public_manage_members", + "", + "", + PermissionScopePlaybook, + } + + PermissionPublicPlaybookManageRoles = &Permission{ + "playbook_public_manage_roles", + "", + "", + PermissionScopePlaybook, + } + + PermissionPublicPlaybookView = &Permission{ + "playbook_public_view", + "", + "", + PermissionScopePlaybook, + } + + PermissionPublicPlaybookMakePrivate = &Permission{ + "playbook_public_make_private", + "", + "", + PermissionScopePlaybook, + } + + PermissionPrivatePlaybookCreate = &Permission{ + "playbook_private_create", + "", + "", + PermissionScopeTeam, + } + + PermissionPrivatePlaybookManageProperties = &Permission{ + "playbook_private_manage_properties", + "", + "", + PermissionScopePlaybook, + } + + PermissionPrivatePlaybookManageMembers = &Permission{ + "playbook_private_manage_members", + "", + "", + PermissionScopePlaybook, + } + + PermissionPrivatePlaybookManageRoles = &Permission{ + "playbook_private_manage_roles", + "", + "", + PermissionScopePlaybook, + } + + PermissionPrivatePlaybookView = &Permission{ + "playbook_private_view", + "", + "", + PermissionScopePlaybook, + } + + PermissionPrivatePlaybookMakePublic = &Permission{ + "playbook_private_make_public", + "", + "", + PermissionScopePlaybook, + } + + PermissionRunCreate = &Permission{ + "run_create", + "", + "", + PermissionScopePlaybook, + } + + PermissionRunManageProperties = &Permission{ + "run_manage_properties", + "", + "", + PermissionScopeRun, + } + + PermissionRunManageMembers = &Permission{ + "run_manage_members", + "", + "", + PermissionScopeRun, + } + + PermissionRunView = &Permission{ + "run_view", + "", + "", + PermissionScopeRun, + } + + SysconsoleReadPermissions = []*Permission{ + PermissionSysconsoleReadAboutEditionAndLicense, + PermissionSysconsoleReadBilling, + PermissionSysconsoleReadReportingSiteStatistics, + PermissionSysconsoleReadReportingTeamStatistics, + PermissionSysconsoleReadReportingServerLogs, + PermissionSysconsoleReadUserManagementUsers, + PermissionSysconsoleReadUserManagementGroups, + PermissionSysconsoleReadUserManagementTeams, + PermissionSysconsoleReadUserManagementChannels, + PermissionSysconsoleReadUserManagementPermissions, + PermissionSysconsoleReadUserManagementSystemRoles, + PermissionSysconsoleReadEnvironmentWebServer, + PermissionSysconsoleReadEnvironmentDatabase, + PermissionSysconsoleReadEnvironmentElasticsearch, + PermissionSysconsoleReadEnvironmentFileStorage, + PermissionSysconsoleReadEnvironmentImageProxy, + PermissionSysconsoleReadEnvironmentSMTP, + PermissionSysconsoleReadEnvironmentPushNotificationServer, + PermissionSysconsoleReadEnvironmentHighAvailability, + PermissionSysconsoleReadEnvironmentRateLimiting, + PermissionSysconsoleReadEnvironmentLogging, + PermissionSysconsoleReadEnvironmentSessionLengths, + PermissionSysconsoleReadEnvironmentPerformanceMonitoring, + PermissionSysconsoleReadEnvironmentDeveloper, + PermissionSysconsoleReadSiteCustomization, + PermissionSysconsoleReadSiteLocalization, + PermissionSysconsoleReadSiteUsersAndTeams, + PermissionSysconsoleReadSiteNotifications, + PermissionSysconsoleReadSiteAnnouncementBanner, + PermissionSysconsoleReadSiteEmoji, + PermissionSysconsoleReadSitePosts, + PermissionSysconsoleReadSiteFileSharingAndDownloads, + PermissionSysconsoleReadSitePublicLinks, + PermissionSysconsoleReadSiteNotices, + PermissionSysconsoleReadAuthenticationSignup, + PermissionSysconsoleReadAuthenticationEmail, + PermissionSysconsoleReadAuthenticationPassword, + PermissionSysconsoleReadAuthenticationMfa, + PermissionSysconsoleReadAuthenticationLdap, + PermissionSysconsoleReadAuthenticationSaml, + PermissionSysconsoleReadAuthenticationOpenid, + PermissionSysconsoleReadAuthenticationGuestAccess, + PermissionSysconsoleReadPlugins, + PermissionSysconsoleReadIntegrationsIntegrationManagement, + PermissionSysconsoleReadIntegrationsBotAccounts, + PermissionSysconsoleReadIntegrationsGif, + PermissionSysconsoleReadIntegrationsCors, + PermissionSysconsoleReadComplianceDataRetentionPolicy, + PermissionSysconsoleReadComplianceComplianceExport, + PermissionSysconsoleReadComplianceComplianceMonitoring, + PermissionSysconsoleReadComplianceCustomTermsOfService, + PermissionSysconsoleReadExperimentalFeatures, + PermissionSysconsoleReadExperimentalFeatureFlags, + PermissionSysconsoleReadExperimentalBleve, + } + + SysconsoleWritePermissions = []*Permission{ + PermissionSysconsoleWriteAboutEditionAndLicense, + PermissionSysconsoleWriteBilling, + PermissionSysconsoleWriteReportingSiteStatistics, + PermissionSysconsoleWriteReportingTeamStatistics, + PermissionSysconsoleWriteReportingServerLogs, + PermissionSysconsoleWriteUserManagementUsers, + PermissionSysconsoleWriteUserManagementGroups, + PermissionSysconsoleWriteUserManagementTeams, + PermissionSysconsoleWriteUserManagementChannels, + PermissionSysconsoleWriteUserManagementPermissions, + PermissionSysconsoleWriteUserManagementSystemRoles, + PermissionSysconsoleWriteEnvironmentWebServer, + PermissionSysconsoleWriteEnvironmentDatabase, + PermissionSysconsoleWriteEnvironmentElasticsearch, + PermissionSysconsoleWriteEnvironmentFileStorage, + PermissionSysconsoleWriteEnvironmentImageProxy, + PermissionSysconsoleWriteEnvironmentSMTP, + PermissionSysconsoleWriteEnvironmentPushNotificationServer, + PermissionSysconsoleWriteEnvironmentHighAvailability, + PermissionSysconsoleWriteEnvironmentRateLimiting, + PermissionSysconsoleWriteEnvironmentLogging, + PermissionSysconsoleWriteEnvironmentSessionLengths, + PermissionSysconsoleWriteEnvironmentPerformanceMonitoring, + PermissionSysconsoleWriteEnvironmentDeveloper, + PermissionSysconsoleWriteSiteCustomization, + PermissionSysconsoleWriteSiteLocalization, + PermissionSysconsoleWriteSiteUsersAndTeams, + PermissionSysconsoleWriteSiteNotifications, + PermissionSysconsoleWriteSiteAnnouncementBanner, + PermissionSysconsoleWriteSiteEmoji, + PermissionSysconsoleWriteSitePosts, + PermissionSysconsoleWriteSiteFileSharingAndDownloads, + PermissionSysconsoleWriteSitePublicLinks, + PermissionSysconsoleWriteSiteNotices, + PermissionSysconsoleWriteAuthenticationSignup, + PermissionSysconsoleWriteAuthenticationEmail, + PermissionSysconsoleWriteAuthenticationPassword, + PermissionSysconsoleWriteAuthenticationMfa, + PermissionSysconsoleWriteAuthenticationLdap, + PermissionSysconsoleWriteAuthenticationSaml, + PermissionSysconsoleWriteAuthenticationOpenid, + PermissionSysconsoleWriteAuthenticationGuestAccess, + PermissionSysconsoleWritePlugins, + PermissionSysconsoleWriteIntegrationsIntegrationManagement, + PermissionSysconsoleWriteIntegrationsBotAccounts, + PermissionSysconsoleWriteIntegrationsGif, + PermissionSysconsoleWriteIntegrationsCors, + PermissionSysconsoleWriteComplianceDataRetentionPolicy, + PermissionSysconsoleWriteComplianceComplianceExport, + PermissionSysconsoleWriteComplianceComplianceMonitoring, + PermissionSysconsoleWriteComplianceCustomTermsOfService, + PermissionSysconsoleWriteExperimentalFeatures, + PermissionSysconsoleWriteExperimentalFeatureFlags, + PermissionSysconsoleWriteExperimentalBleve, + } + + SystemScopedPermissionsMinusSysconsole := []*Permission{ + PermissionAssignSystemAdminRole, + PermissionManageRoles, + PermissionManageSystem, + PermissionCreateDirectChannel, + PermissionCreateGroupChannel, + PermissionListPublicTeams, + PermissionJoinPublicTeams, + PermissionListPrivateTeams, + PermissionJoinPrivateTeams, + PermissionEditOtherUsers, + PermissionReadOtherUsersTeams, + PermissionGetPublicLink, + PermissionManageOAuth, + PermissionManageSystemWideOAuth, + PermissionCreateTeam, + PermissionListUsersWithoutTeam, + PermissionCreateUserAccessToken, + PermissionReadUserAccessToken, + PermissionRevokeUserAccessToken, + PermissionCreateBot, + PermissionAssignBot, + PermissionReadBots, + PermissionReadOthersBots, + PermissionManageBots, + PermissionManageOthersBots, + PermissionReadJobs, + PermissionManageJobs, + PermissionPromoteGuest, + PermissionDemoteToGuest, + PermissionEditBrand, + PermissionManageSharedChannels, + PermissionManageSecureConnections, + PermissionDownloadComplianceExportResult, + PermissionCreateDataRetentionJob, + PermissionReadDataRetentionJob, + PermissionCreateComplianceExportJob, + PermissionReadComplianceExportJob, + PermissionReadAudits, + PermissionTestSiteURL, + PermissionTestElasticsearch, + PermissionTestS3, + PermissionReloadConfig, + PermissionInvalidateCaches, + PermissionRecycleDatabaseConnections, + PermissionPurgeElasticsearchIndexes, + PermissionTestEmail, + PermissionCreateElasticsearchPostIndexingJob, + PermissionCreateElasticsearchPostAggregationJob, + PermissionReadElasticsearchPostIndexingJob, + PermissionReadElasticsearchPostAggregationJob, + PermissionPurgeBleveIndexes, + PermissionCreatePostBleveIndexesJob, + PermissionCreateLdapSyncJob, + PermissionReadLdapSyncJob, + PermissionTestLdap, + PermissionInvalidateEmailInvite, + PermissionGetSamlMetadataFromIdp, + PermissionAddSamlPublicCert, + PermissionAddSamlPrivateCert, + PermissionAddSamlIdpCert, + PermissionRemoveSamlPublicCert, + PermissionRemoveSamlPrivateCert, + PermissionRemoveSamlIdpCert, + PermissionGetSamlCertStatus, + PermissionAddLdapPublicCert, + PermissionAddLdapPrivateCert, + PermissionRemoveLdapPublicCert, + PermissionRemoveLdapPrivateCert, + PermissionGetAnalytics, + PermissionGetLogs, + PermissionReadLicenseInformation, + PermissionManageLicenseInformation, + PermissionCreateCustomGroup, + } + + TeamScopedPermissions := []*Permission{ + PermissionInviteUser, + PermissionAddUserToTeam, + PermissionManageSlashCommands, + PermissionManageOthersSlashCommands, + PermissionCreatePublicChannel, + PermissionCreatePrivateChannel, + PermissionManageTeamRoles, + PermissionListTeamChannels, + PermissionJoinPublicChannels, + PermissionReadPublicChannel, + PermissionManageIncomingWebhooks, + PermissionManageOutgoingWebhooks, + PermissionManageOthersIncomingWebhooks, + PermissionManageOthersOutgoingWebhooks, + PermissionCreateEmojis, + PermissionDeleteEmojis, + PermissionDeleteOthersEmojis, + PermissionRemoveUserFromTeam, + PermissionManageTeam, + PermissionImportTeam, + PermissionViewTeam, + PermissionViewMembers, + PermissionInviteGuest, + PermissionPublicPlaybookCreate, + PermissionPrivatePlaybookCreate, + } + + ChannelScopedPermissions := []*Permission{ + PermissionUseSlashCommands, + PermissionManagePublicChannelMembers, + PermissionManagePrivateChannelMembers, + PermissionManageChannelRoles, + PermissionManagePublicChannelProperties, + PermissionManagePrivateChannelProperties, + PermissionConvertPublicChannelToPrivate, + PermissionConvertPrivateChannelToPublic, + PermissionDeletePublicChannel, + PermissionDeletePrivateChannel, + PermissionReadChannel, + PermissionReadPublicChannelGroups, + PermissionReadPrivateChannelGroups, + PermissionAddReaction, + PermissionRemoveReaction, + PermissionRemoveOthersReactions, + PermissionUploadFile, + PermissionCreatePost, + PermissionCreatePostPublic, + PermissionCreatePostEphemeral, + PermissionEditPost, + PermissionEditOthersPosts, + PermissionDeletePost, + PermissionDeleteOthersPosts, + PermissionUseChannelMentions, + PermissionUseGroupMentions, + } + + GroupScopedPermissions := []*Permission{ + PermissionManageCustomGroupMembers, + PermissionEditCustomGroup, + PermissionDeleteCustomGroup, + } + + DeprecatedPermissions = []*Permission{ + PermissionPermanentDeleteUser, + PermissionManageWebhooks, + PermissionManageOthersWebhooks, + PermissionManageEmojis, + PermissionManageOthersEmojis, + PermissionSysconsoleReadAuthentication, + PermissionSysconsoleWriteAuthentication, + PermissionSysconsoleReadSite, + PermissionSysconsoleWriteSite, + PermissionSysconsoleReadEnvironment, + PermissionSysconsoleWriteEnvironment, + PermissionSysconsoleReadReporting, + PermissionSysconsoleWriteReporting, + PermissionSysconsoleReadAbout, + PermissionSysconsoleWriteAbout, + PermissionSysconsoleReadExperimental, + PermissionSysconsoleWriteExperimental, + PermissionSysconsoleReadIntegrations, + PermissionSysconsoleWriteIntegrations, + PermissionSysconsoleReadCompliance, + PermissionSysconsoleWriteCompliance, + } + + PlaybookScopedPermissions := []*Permission{ + PermissionPublicPlaybookManageProperties, + PermissionPublicPlaybookManageMembers, + PermissionPublicPlaybookManageRoles, + PermissionPublicPlaybookView, + PermissionPublicPlaybookMakePrivate, + PermissionPrivatePlaybookManageProperties, + PermissionPrivatePlaybookManageMembers, + PermissionPrivatePlaybookManageRoles, + PermissionPrivatePlaybookView, + PermissionPrivatePlaybookMakePublic, + PermissionRunCreate, + } + + RunScopedPermissions := []*Permission{ + PermissionRunManageProperties, + PermissionRunManageMembers, + PermissionRunView, + } + + AllPermissions = []*Permission{} + AllPermissions = append(AllPermissions, SystemScopedPermissionsMinusSysconsole...) + AllPermissions = append(AllPermissions, TeamScopedPermissions...) + AllPermissions = append(AllPermissions, ChannelScopedPermissions...) + AllPermissions = append(AllPermissions, SysconsoleReadPermissions...) + AllPermissions = append(AllPermissions, SysconsoleWritePermissions...) + AllPermissions = append(AllPermissions, GroupScopedPermissions...) + AllPermissions = append(AllPermissions, PlaybookScopedPermissions...) + AllPermissions = append(AllPermissions, RunScopedPermissions...) + + ChannelModeratedPermissions = []string{ + PermissionCreatePost.Id, + "create_reactions", + "manage_members", + PermissionUseChannelMentions.Id, + } + + ChannelModeratedPermissionsMap = map[string]string{ + PermissionCreatePost.Id: ChannelModeratedPermissions[0], + PermissionAddReaction.Id: ChannelModeratedPermissions[1], + PermissionRemoveReaction.Id: ChannelModeratedPermissions[1], + PermissionManagePublicChannelMembers.Id: ChannelModeratedPermissions[2], + PermissionManagePrivateChannelMembers.Id: ChannelModeratedPermissions[2], + PermissionUseChannelMentions.Id: ChannelModeratedPermissions[3], + } +} + +func init() { + initializePermissions() +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_cluster_event.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_cluster_event.go new file mode 100644 index 00000000..9e227447 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_cluster_event.go @@ -0,0 +1,28 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + PluginClusterEventSendTypeReliable = ClusterSendReliable + PluginClusterEventSendTypeBestEffort = ClusterSendBestEffort +) + +// PluginClusterEvent is used to allow intra-cluster plugin communication. +type PluginClusterEvent struct { + // Id is the unique identifier for the event. + Id string + // Data is the event payload. + Data []byte +} + +// PluginClusterEventSendOptions defines some properties that apply when sending +// plugin events across a cluster. +type PluginClusterEventSendOptions struct { + // SendType defines the type of communication channel used to send the event. + SendType string + // TargetId identifies the cluster node to which the event should be sent. + // It should match the cluster id of the receiving instance. + // If empty, the event gets broadcasted to all other nodes. + TargetId string +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_event_data.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_event_data.go new file mode 100644 index 00000000..1253533d --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_event_data.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// PluginEventData used to notify peers about plugin changes. +type PluginEventData struct { + Id string `json:"id"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_key_value.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_key_value.go similarity index 57% rename from vendor/github.com/mattermost/mattermost-server/v5/model/plugin_key_value.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/plugin_key_value.go index cd5406ea..c615bcf7 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_key_value.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_key_value.go @@ -9,8 +9,8 @@ import ( ) const ( - KEY_VALUE_PLUGIN_ID_MAX_RUNES = 190 - KEY_VALUE_KEY_MAX_RUNES = 50 + KeyValuePluginIdMaxRunes = 190 + KeyValueKeyMaxRunes = 150 ) type PluginKeyValue struct { @@ -21,12 +21,12 @@ type PluginKeyValue struct { } func (kv *PluginKeyValue) IsValid() *AppError { - if len(kv.PluginId) == 0 || utf8.RuneCountInString(kv.PluginId) > KEY_VALUE_PLUGIN_ID_MAX_RUNES { - return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.plugin_id.app_error", map[string]interface{}{"Max": KEY_VALUE_KEY_MAX_RUNES, "Min": 0}, "key="+kv.Key, http.StatusBadRequest) + if kv.PluginId == "" || utf8.RuneCountInString(kv.PluginId) > KeyValuePluginIdMaxRunes { + return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.plugin_id.app_error", map[string]interface{}{"Max": KeyValueKeyMaxRunes, "Min": 0}, "key="+kv.Key, http.StatusBadRequest) } - if len(kv.Key) == 0 || utf8.RuneCountInString(kv.Key) > KEY_VALUE_KEY_MAX_RUNES { - return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.key.app_error", map[string]interface{}{"Max": KEY_VALUE_KEY_MAX_RUNES, "Min": 0}, "key="+kv.Key, http.StatusBadRequest) + if kv.Key == "" || utf8.RuneCountInString(kv.Key) > KeyValueKeyMaxRunes { + return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.key.app_error", map[string]interface{}{"Max": KeyValueKeyMaxRunes, "Min": 0}, "key="+kv.Key, http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_kvset_options.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_kvset_options.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/plugin_kvset_options.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/plugin_kvset_options.go diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_on_install_event.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_on_install_event.go new file mode 100644 index 00000000..186fd5bd --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_on_install_event.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// OnInstallEvent is sent to the plugin when it gets installed. +type OnInstallEvent struct { + UserId string // The user who installed the plugin +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_status.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_status.go similarity index 75% rename from vendor/github.com/mattermost/mattermost-server/v5/model/plugin_status.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/plugin_status.go index b4ba2e73..c206505b 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_status.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_status.go @@ -3,11 +3,6 @@ package model -import ( - "encoding/json" - "io" -) - const ( PluginStateNotRunning = 0 PluginStateStarting = 1 // unused by server @@ -29,14 +24,3 @@ type PluginStatus struct { } type PluginStatuses []*PluginStatus - -func (m *PluginStatuses) ToJson() string { - b, _ := json.Marshal(m) - return string(b) -} - -func PluginStatusesFromJson(data io.Reader) PluginStatuses { - var m PluginStatuses - json.NewDecoder(data).Decode(&m) - return m -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_valid.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_valid.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/plugin_valid.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/plugin_valid.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/plugins_response.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugins_response.go similarity index 51% rename from vendor/github.com/mattermost/mattermost-server/v5/model/plugins_response.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/plugins_response.go index 421ee2f5..5aed0b3c 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/plugins_response.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/plugins_response.go @@ -3,11 +3,6 @@ package model -import ( - "encoding/json" - "io" -) - type PluginInfo struct { Manifest } @@ -16,14 +11,3 @@ type PluginsResponse struct { Active []*PluginInfo `json:"active"` Inactive []*PluginInfo `json:"inactive"` } - -func (m *PluginsResponse) ToJson() string { - b, _ := json.Marshal(m) - return string(b) -} - -func PluginsResponseFromJson(data io.Reader) *PluginsResponse { - var m *PluginsResponse - json.NewDecoder(data).Decode(&m) - return m -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/post.go b/vendor/github.com/mattermost/mattermost-server/v6/model/post.go similarity index 60% rename from vendor/github.com/mattermost/mattermost-server/v5/model/post.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/post.go index 7c27eca9..039f761e 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/post.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/post.go @@ -14,61 +14,63 @@ import ( "sync" "unicode/utf8" - "github.com/mattermost/mattermost-server/v5/utils/markdown" + "github.com/mattermost/mattermost-server/v6/shared/markdown" ) const ( - POST_SYSTEM_MESSAGE_PREFIX = "system_" - POST_DEFAULT = "" - POST_SLACK_ATTACHMENT = "slack_attachment" - POST_SYSTEM_GENERIC = "system_generic" - POST_JOIN_LEAVE = "system_join_leave" // Deprecated, use POST_JOIN_CHANNEL or POST_LEAVE_CHANNEL instead - POST_JOIN_CHANNEL = "system_join_channel" - POST_GUEST_JOIN_CHANNEL = "system_guest_join_channel" - POST_LEAVE_CHANNEL = "system_leave_channel" - POST_JOIN_TEAM = "system_join_team" - POST_LEAVE_TEAM = "system_leave_team" - POST_AUTO_RESPONDER = "system_auto_responder" - POST_ADD_REMOVE = "system_add_remove" // Deprecated, use POST_ADD_TO_CHANNEL or POST_REMOVE_FROM_CHANNEL instead - POST_ADD_TO_CHANNEL = "system_add_to_channel" - POST_ADD_GUEST_TO_CHANNEL = "system_add_guest_to_chan" - POST_REMOVE_FROM_CHANNEL = "system_remove_from_channel" - POST_MOVE_CHANNEL = "system_move_channel" - POST_ADD_TO_TEAM = "system_add_to_team" - POST_REMOVE_FROM_TEAM = "system_remove_from_team" - POST_HEADER_CHANGE = "system_header_change" - POST_DISPLAYNAME_CHANGE = "system_displayname_change" - POST_CONVERT_CHANNEL = "system_convert_channel" - POST_PURPOSE_CHANGE = "system_purpose_change" - POST_CHANNEL_DELETED = "system_channel_deleted" - POST_CHANNEL_RESTORED = "system_channel_restored" - POST_EPHEMERAL = "system_ephemeral" - POST_CHANGE_CHANNEL_PRIVACY = "system_change_chan_privacy" - POST_ADD_BOT_TEAMS_CHANNELS = "add_bot_teams_channels" - POST_FILEIDS_MAX_RUNES = 150 - POST_FILENAMES_MAX_RUNES = 4000 - POST_HASHTAGS_MAX_RUNES = 1000 - POST_MESSAGE_MAX_RUNES_V1 = 4000 - POST_MESSAGE_MAX_BYTES_V2 = 65535 // Maximum size of a TEXT column in MySQL - POST_MESSAGE_MAX_RUNES_V2 = POST_MESSAGE_MAX_BYTES_V2 / 4 // Assume a worst-case representation - POST_PROPS_MAX_RUNES = 8000 - POST_PROPS_MAX_USER_RUNES = POST_PROPS_MAX_RUNES - 400 // Leave some room for system / pre-save modifications - POST_CUSTOM_TYPE_PREFIX = "custom_" - POST_ME = "me" - PROPS_ADD_CHANNEL_MEMBER = "add_channel_member" - - POST_PROPS_ADDED_USER_ID = "addedUserId" - POST_PROPS_DELETE_BY = "deleteBy" - POST_PROPS_OVERRIDE_ICON_URL = "override_icon_url" - POST_PROPS_OVERRIDE_ICON_EMOJI = "override_icon_emoji" - - POST_PROPS_MENTION_HIGHLIGHT_DISABLED = "mentionHighlightDisabled" - POST_PROPS_GROUP_HIGHLIGHT_DISABLED = "disable_group_highlight" - POST_SYSTEM_WARN_METRIC_STATUS = "warn_metric_status" + PostSystemMessagePrefix = "system_" + PostTypeDefault = "" + PostTypeSlackAttachment = "slack_attachment" + PostTypeSystemGeneric = "system_generic" + PostTypeJoinLeave = "system_join_leave" // Deprecated, use PostJoinChannel or PostLeaveChannel instead + PostTypeJoinChannel = "system_join_channel" + PostTypeGuestJoinChannel = "system_guest_join_channel" + PostTypeLeaveChannel = "system_leave_channel" + PostTypeJoinTeam = "system_join_team" + PostTypeLeaveTeam = "system_leave_team" + PostTypeAutoResponder = "system_auto_responder" + PostTypeAddRemove = "system_add_remove" // Deprecated, use PostAddToChannel or PostRemoveFromChannel instead + PostTypeAddToChannel = "system_add_to_channel" + PostTypeAddGuestToChannel = "system_add_guest_to_chan" + PostTypeRemoveFromChannel = "system_remove_from_channel" + PostTypeMoveChannel = "system_move_channel" + PostTypeAddToTeam = "system_add_to_team" + PostTypeRemoveFromTeam = "system_remove_from_team" + PostTypeHeaderChange = "system_header_change" + PostTypeDisplaynameChange = "system_displayname_change" + PostTypeConvertChannel = "system_convert_channel" + PostTypePurposeChange = "system_purpose_change" + PostTypeChannelDeleted = "system_channel_deleted" + PostTypeChannelRestored = "system_channel_restored" + PostTypeEphemeral = "system_ephemeral" + PostTypeChangeChannelPrivacy = "system_change_chan_privacy" + PostTypeAddBotTeamsChannels = "add_bot_teams_channels" + PostTypeSystemWarnMetricStatus = "warn_metric_status" + PostTypeMe = "me" + PostCustomTypePrefix = "custom_" + + PostFileidsMaxRunes = 300 + PostFilenamesMaxRunes = 4000 + PostHashtagsMaxRunes = 1000 + PostMessageMaxRunesV1 = 4000 + PostMessageMaxBytesV2 = 65535 // Maximum size of a TEXT column in MySQL + PostMessageMaxRunesV2 = PostMessageMaxBytesV2 / 4 // Assume a worst-case representation + PostPropsMaxRunes = 800000 + PostPropsMaxUserRunes = PostPropsMaxRunes - 40000 // Leave some room for system / pre-save modifications + + PropsAddChannelMember = "add_channel_member" + + PostPropsAddedUserId = "addedUserId" + PostPropsDeleteBy = "deleteBy" + PostPropsOverrideIconURL = "override_icon_url" + PostPropsOverrideIconEmoji = "override_icon_emoji" + + PostPropsMentionHighlightDisabled = "mentionHighlightDisabled" + PostPropsGroupHighlightDisabled = "disable_group_highlight" + + PostPropsPreviewedPost = "previewed_post" ) -var AT_MENTION_PATTEN = regexp.MustCompile(`\B@`) - type Post struct { Id string `json:"id"` CreateAt int64 `json:"create_at"` @@ -79,27 +81,30 @@ type Post struct { UserId string `json:"user_id"` ChannelId string `json:"channel_id"` RootId string `json:"root_id"` - ParentId string `json:"parent_id"` OriginalId string `json:"original_id"` Message string `json:"message"` // MessageSource will contain the message as submitted by the user if Message has been modified // by Mattermost for presentation (e.g if an image proxy is being used). It should be used to // populate edit boxes if present. - MessageSource string `json:"message_source,omitempty" db:"-"` + MessageSource string `json:"message_source,omitempty"` Type string `json:"type"` propsMu sync.RWMutex `db:"-"` // Unexported mutex used to guard Post.Props. Props StringInterface `json:"props"` // Deprecated: use GetProps() Hashtags string `json:"hashtags"` - Filenames StringArray `json:"filenames,omitempty"` // Deprecated, do not use this field any more + Filenames StringArray `json:"-"` // Deprecated, do not use this field any more FileIds StringArray `json:"file_ids,omitempty"` - PendingPostId string `json:"pending_post_id" db:"-"` + PendingPostId string `json:"pending_post_id"` HasReactions bool `json:"has_reactions,omitempty"` + RemoteId *string `json:"remote_id,omitempty"` // Transient data populated before sending a post to the client - ReplyCount int64 `json:"reply_count" db:"-"` - Metadata *PostMetadata `json:"metadata,omitempty" db:"-"` + ReplyCount int64 `json:"reply_count"` + LastReplyAt int64 `json:"last_reply_at"` + Participants []*User `json:"participants"` + IsFollowing *bool `json:"is_following,omitempty"` // for root posts in collapsed thread mode indicates if the current user is following this thread + Metadata *PostMetadata `json:"metadata,omitempty"` } type PostEphemeral struct { @@ -163,6 +168,12 @@ type PostForIndexing struct { ParentCreateAt *int64 `json:"parent_create_at"` } +type FileForIndexing struct { + FileInfo + ChannelId string `json:"channel_id"` + Content string `json:"content"` +} + // ShallowCopy is an utility function to shallow copy a Post to the given // destination without touching the internal RWMutex. func (o *Post) ShallowCopy(dst *Post) error { @@ -182,7 +193,6 @@ func (o *Post) ShallowCopy(dst *Post) error { dst.UserId = o.UserId dst.ChannelId = o.ChannelId dst.RootId = o.RootId - dst.ParentId = o.ParentId dst.OriginalId = o.OriginalId dst.Message = o.Message dst.MessageSource = o.MessageSource @@ -194,7 +204,13 @@ func (o *Post) ShallowCopy(dst *Post) error { dst.PendingPostId = o.PendingPostId dst.HasReactions = o.HasReactions dst.ReplyCount = o.ReplyCount + dst.Participants = o.Participants + dst.LastReplyAt = o.LastReplyAt dst.Metadata = o.Metadata + if o.IsFollowing != nil { + dst.IsFollowing = NewBool(*o.IsFollowing) + } + dst.RemoteId = o.RemoteId return nil } @@ -205,36 +221,51 @@ func (o *Post) Clone() *Post { return copy } -func (o *Post) ToJson() string { +func (o *Post) ToJSON() (string, error) { copy := o.Clone() copy.StripActionIntegrations() - b, _ := json.Marshal(copy) - return string(b) + b, err := json.Marshal(copy) + return string(b), err } -func (o *Post) ToUnsanitizedJson() string { - b, _ := json.Marshal(o) - return string(b) +func (o *Post) EncodeJSON(w io.Writer) error { + o.StripActionIntegrations() + return json.NewEncoder(w).Encode(o) } type GetPostsSinceOptions struct { - ChannelId string - Time int64 - SkipFetchThreads bool + UserId string + ChannelId string + Time int64 + SkipFetchThreads bool + CollapsedThreads bool + CollapsedThreadsExtended bool + SortAscending bool } -type GetPostsOptions struct { - ChannelId string - PostId string - Page int - PerPage int - SkipFetchThreads bool +type GetPostsSinceForSyncCursor struct { + LastPostUpdateAt int64 + LastPostId string } -func PostFromJson(data io.Reader) *Post { - var o *Post - json.NewDecoder(data).Decode(&o) - return o +type GetPostsSinceForSyncOptions struct { + ChannelId string + ExcludeRemoteId string + IncludeDeleted bool +} + +type GetPostsOptions struct { + UserId string + ChannelId string + PostId string + Page int + PerPage int + SkipFetchThreads bool + CollapsedThreads bool + CollapsedThreadsExtended bool + FromPost string // PostId after which to send the items + FromCreateAt int64 // CreateAt after which to send the items + Direction string // Only accepts up|down. Indicates the order in which to send the items. } func (o *Post) Etag() string { @@ -262,19 +293,11 @@ func (o *Post) IsValid(maxPostSize int) *AppError { return NewAppError("Post.IsValid", "model.post.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) } - if !(IsValidId(o.RootId) || len(o.RootId) == 0) { + if !(IsValidId(o.RootId) || o.RootId == "") { return NewAppError("Post.IsValid", "model.post.is_valid.root_id.app_error", nil, "", http.StatusBadRequest) } - if !(IsValidId(o.ParentId) || len(o.ParentId) == 0) { - return NewAppError("Post.IsValid", "model.post.is_valid.parent_id.app_error", nil, "", http.StatusBadRequest) - } - - if len(o.ParentId) == 26 && len(o.RootId) == 0 { - return NewAppError("Post.IsValid", "model.post.is_valid.root_parent.app_error", nil, "", http.StatusBadRequest) - } - - if !(len(o.OriginalId) == 26 || len(o.OriginalId) == 0) { + if !(len(o.OriginalId) == 26 || o.OriginalId == "") { return NewAppError("Post.IsValid", "model.post.is_valid.original_id.app_error", nil, "", http.StatusBadRequest) } @@ -282,54 +305,54 @@ func (o *Post) IsValid(maxPostSize int) *AppError { return NewAppError("Post.IsValid", "model.post.is_valid.msg.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if utf8.RuneCountInString(o.Hashtags) > POST_HASHTAGS_MAX_RUNES { + if utf8.RuneCountInString(o.Hashtags) > PostHashtagsMaxRunes { return NewAppError("Post.IsValid", "model.post.is_valid.hashtags.app_error", nil, "id="+o.Id, http.StatusBadRequest) } switch o.Type { case - POST_DEFAULT, - POST_SYSTEM_GENERIC, - POST_JOIN_LEAVE, - POST_AUTO_RESPONDER, - POST_ADD_REMOVE, - POST_JOIN_CHANNEL, - POST_GUEST_JOIN_CHANNEL, - POST_LEAVE_CHANNEL, - POST_JOIN_TEAM, - POST_LEAVE_TEAM, - POST_ADD_TO_CHANNEL, - POST_ADD_GUEST_TO_CHANNEL, - POST_REMOVE_FROM_CHANNEL, - POST_MOVE_CHANNEL, - POST_ADD_TO_TEAM, - POST_REMOVE_FROM_TEAM, - POST_SLACK_ATTACHMENT, - POST_HEADER_CHANGE, - POST_PURPOSE_CHANGE, - POST_DISPLAYNAME_CHANGE, - POST_CONVERT_CHANNEL, - POST_CHANNEL_DELETED, - POST_CHANNEL_RESTORED, - POST_CHANGE_CHANNEL_PRIVACY, - POST_ME, - POST_ADD_BOT_TEAMS_CHANNELS, - POST_SYSTEM_WARN_METRIC_STATUS: + PostTypeDefault, + PostTypeSystemGeneric, + PostTypeJoinLeave, + PostTypeAutoResponder, + PostTypeAddRemove, + PostTypeJoinChannel, + PostTypeGuestJoinChannel, + PostTypeLeaveChannel, + PostTypeJoinTeam, + PostTypeLeaveTeam, + PostTypeAddToChannel, + PostTypeAddGuestToChannel, + PostTypeRemoveFromChannel, + PostTypeMoveChannel, + PostTypeAddToTeam, + PostTypeRemoveFromTeam, + PostTypeSlackAttachment, + PostTypeHeaderChange, + PostTypePurposeChange, + PostTypeDisplaynameChange, + PostTypeConvertChannel, + PostTypeChannelDeleted, + PostTypeChannelRestored, + PostTypeChangeChannelPrivacy, + PostTypeAddBotTeamsChannels, + PostTypeSystemWarnMetricStatus, + PostTypeMe: default: - if !strings.HasPrefix(o.Type, POST_CUSTOM_TYPE_PREFIX) { + if !strings.HasPrefix(o.Type, PostCustomTypePrefix) { return NewAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type, http.StatusBadRequest) } } - if utf8.RuneCountInString(ArrayToJson(o.Filenames)) > POST_FILENAMES_MAX_RUNES { + if utf8.RuneCountInString(ArrayToJSON(o.Filenames)) > PostFilenamesMaxRunes { return NewAppError("Post.IsValid", "model.post.is_valid.filenames.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if utf8.RuneCountInString(ArrayToJson(o.FileIds)) > POST_FILEIDS_MAX_RUNES { + if utf8.RuneCountInString(ArrayToJSON(o.FileIds)) > PostFileidsMaxRunes { return NewAppError("Post.IsValid", "model.post.is_valid.file_ids.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if utf8.RuneCountInString(StringInterfaceToJson(o.GetProps())) > POST_PROPS_MAX_RUNES { + if utf8.RuneCountInString(StringInterfaceToJSON(o.GetProps())) > PostPropsMaxRunes { return NewAppError("Post.IsValid", "model.post.is_valid.props.app_error", nil, "id="+o.Id, http.StatusBadRequest) } @@ -337,8 +360,11 @@ func (o *Post) IsValid(maxPostSize int) *AppError { } func (o *Post) SanitizeProps() { + if o == nil { + return + } membersToSanitize := []string{ - PROPS_ADD_CHANNEL_MEMBER, + PropsAddChannelMember, } for _, member := range membersToSanitize { @@ -346,6 +372,9 @@ func (o *Post) SanitizeProps() { o.DelProp(member) } } + for _, p := range o.Participants { + p.Sanitize(map[string]bool{}) + } } func (o *Post) PreSave() { @@ -429,20 +458,33 @@ func (o *Post) GetProp(key string) interface{} { } func (o *Post) IsSystemMessage() bool { - return len(o.Type) >= len(POST_SYSTEM_MESSAGE_PREFIX) && o.Type[:len(POST_SYSTEM_MESSAGE_PREFIX)] == POST_SYSTEM_MESSAGE_PREFIX + return len(o.Type) >= len(PostSystemMessagePrefix) && o.Type[:len(PostSystemMessagePrefix)] == PostSystemMessagePrefix +} + +// IsRemote returns true if the post originated on a remote cluster. +func (o *Post) IsRemote() bool { + return o.RemoteId != nil && *o.RemoteId != "" +} + +// GetRemoteID safely returns the remoteID or empty string if not remote. +func (o *Post) GetRemoteID() string { + if o.RemoteId != nil { + return *o.RemoteId + } + return "" } func (o *Post) IsJoinLeaveMessage() bool { - return o.Type == POST_JOIN_LEAVE || - o.Type == POST_ADD_REMOVE || - o.Type == POST_JOIN_CHANNEL || - o.Type == POST_LEAVE_CHANNEL || - o.Type == POST_JOIN_TEAM || - o.Type == POST_LEAVE_TEAM || - o.Type == POST_ADD_TO_CHANNEL || - o.Type == POST_REMOVE_FROM_CHANNEL || - o.Type == POST_ADD_TO_TEAM || - o.Type == POST_REMOVE_FROM_TEAM + return o.Type == PostTypeJoinLeave || + o.Type == PostTypeAddRemove || + o.Type == PostTypeJoinChannel || + o.Type == PostTypeLeaveChannel || + o.Type == PostTypeJoinTeam || + o.Type == PostTypeLeaveTeam || + o.Type == PostTypeAddToChannel || + o.Type == PostTypeRemoveFromChannel || + o.Type == PostTypeAddToTeam || + o.Type == PostTypeRemoveFromTeam } func (o *Post) Patch(patch *PostPatch) { @@ -468,46 +510,6 @@ func (o *Post) Patch(patch *PostPatch) { } } -func (o *PostPatch) ToJson() string { - b, err := json.Marshal(o) - if err != nil { - return "" - } - - return string(b) -} - -func PostPatchFromJson(data io.Reader) *PostPatch { - decoder := json.NewDecoder(data) - var post PostPatch - err := decoder.Decode(&post) - if err != nil { - return nil - } - - return &post -} - -func (o *SearchParameter) SearchParameterToJson() string { - b, err := json.Marshal(o) - if err != nil { - return "" - } - - return string(b) -} - -func SearchParameterFromJson(data io.Reader) *SearchParameter { - decoder := json.NewDecoder(data) - var searchParam SearchParameter - err := decoder.Decode(&searchParam) - if err != nil { - return nil - } - - return &searchParam -} - func (o *Post) ChannelMentions() []string { return ChannelMentions(o.Message) } @@ -516,7 +518,7 @@ func (o *Post) ChannelMentions() []string { func (o *Post) DisableMentionHighlights() string { mention, hasMentions := findAtChannelMention(o.Message) if hasMentions { - o.AddProp(POST_PROPS_MENTION_HIGHLIGHT_DISABLED, true) + o.AddProp(PostPropsMentionHighlightDisabled, true) } return mention } @@ -530,7 +532,7 @@ func (o *PostPatch) DisableMentionHighlights() { if o.Props == nil { o.Props = &StringInterface{} } - (*o.Props)[POST_PROPS_MENTION_HIGHLIGHT_DISABLED] = true + (*o.Props)[PostPropsMentionHighlightDisabled] = true } } @@ -553,6 +555,25 @@ func (o *Post) Attachments() []*SlackAttachment { if enc, err := json.Marshal(attachment); err == nil { var decoded SlackAttachment if json.Unmarshal(enc, &decoded) == nil { + // Ignoring nil actions + i := 0 + for _, action := range decoded.Actions { + if action != nil { + decoded.Actions[i] = action + i++ + } + } + decoded.Actions = decoded.Actions[:i] + + // Ignoring nil fields + i = 0 + for _, field := range decoded.Fields { + if field != nil { + decoded.Fields[i] = field + i++ + } + } + decoded.Fields = decoded.Fields[:i] ret = append(ret, &decoded) } } @@ -597,11 +618,6 @@ func (o *Post) WithRewrittenImageURLs(f func(string) string) *Post { return copy } -func (o *PostEphemeral) ToUnsanitizedJson() string { - b, _ := json.Marshal(o) - return string(b) -} - // RewriteImageURLs takes a message and returns a copy that has all of the image URLs replaced // according to the function f. For each image URL, f will be invoked, and the resulting markdown // will contain the URL returned by that invocation instead. @@ -666,3 +682,47 @@ func RewriteImageURLs(message string, f func(string) string) string { return string(result) } + +func (o *Post) IsFromOAuthBot() bool { + props := o.GetProps() + return props["from_webhook"] == "true" && props["override_username"] != "" +} + +func (o *Post) ToNilIfInvalid() *Post { + if o.Id == "" { + return nil + } + return o +} + +func (o *Post) RemovePreviewPost() { + if o.Metadata == nil || o.Metadata.Embeds == nil { + return + } + n := 0 + for _, embed := range o.Metadata.Embeds { + if embed.Type != PostEmbedPermalink { + o.Metadata.Embeds[n] = embed + n++ + } + } + o.Metadata.Embeds = o.Metadata.Embeds[:n] +} + +func (o *Post) GetPreviewPost() *PreviewPost { + for _, embed := range o.Metadata.Embeds { + if embed.Type == PostEmbedPermalink { + if previewPost, ok := embed.Data.(*PreviewPost); ok { + return previewPost + } + } + } + return nil +} + +func (o *Post) GetPreviewedPostProp() string { + if val, ok := o.GetProp(PostPropsPreviewedPost).(string); ok { + return val + } + return "" +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/post_embed.go b/vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go similarity index 58% rename from vendor/github.com/mattermost/mattermost-server/v5/model/post_embed.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go index 5c6efec1..b72ae6e1 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/post_embed.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go @@ -4,10 +4,12 @@ package model const ( - POST_EMBED_IMAGE PostEmbedType = "image" - POST_EMBED_MESSAGE_ATTACHMENT PostEmbedType = "message_attachment" - POST_EMBED_OPENGRAPH PostEmbedType = "opengraph" - POST_EMBED_LINK PostEmbedType = "link" + PostEmbedImage PostEmbedType = "image" + PostEmbedMessageAttachment PostEmbedType = "message_attachment" + PostEmbedOpengraph PostEmbedType = "opengraph" + PostEmbedLink PostEmbedType = "link" + PostEmbedPermalink PostEmbedType = "permalink" + PostEmbedBoards PostEmbedType = "boards" ) type PostEmbedType string diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/post_list.go b/vendor/github.com/mattermost/mattermost-server/v6/model/post_list.go similarity index 78% rename from vendor/github.com/mattermost/mattermost-server/v5/model/post_list.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/post_list.go index d00b68b5..614bbf8f 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/post_list.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/post_list.go @@ -14,6 +14,8 @@ type PostList struct { Posts map[string]*Post `json:"posts"` NextPostId string `json:"next_post_id"` PrevPostId string `json:"prev_post_id"` + // HasNext indicates whether there are more items to be fetched or not. + HasNext bool `json:"has_next"` } func NewPostList() *PostList { @@ -25,8 +27,31 @@ func NewPostList() *PostList { } } +func (o *PostList) Clone() *PostList { + orderCopy := make([]string, len(o.Order)) + postsCopy := make(map[string]*Post) + for i, v := range o.Order { + orderCopy[i] = v + } + for k, v := range o.Posts { + postsCopy[k] = v.Clone() + } + return &PostList{ + Order: orderCopy, + Posts: postsCopy, + NextPostId: o.NextPostId, + PrevPostId: o.PrevPostId, + HasNext: o.HasNext, + } +} + func (o *PostList) ToSlice() []*Post { var posts []*Post + + if l := len(o.Posts); l > 0 { + posts = make([]*Post, 0, l) + } + for _, id := range o.Order { posts = append(posts, o.Posts[id]) } @@ -52,15 +77,16 @@ func (o *PostList) StripActionIntegrations() { } } -func (o *PostList) ToJson() string { +func (o *PostList) ToJSON() (string, error) { copy := *o copy.StripActionIntegrations() b, err := json.Marshal(©) - if err != nil { - return "" - } else { - return string(b) - } + return string(b), err +} + +func (o *PostList) EncodeJSON(w io.Writer) error { + o.StripActionIntegrations() + return json.NewEncoder(w).Encode(o) } func (o *PostList) MakeNonNil() { @@ -158,9 +184,3 @@ func (o *PostList) IsChannelId(channelId string) bool { return true } - -func PostListFromJson(data io.Reader) *PostList { - var o *PostList - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/post_metadata.go b/vendor/github.com/mattermost/mattermost-server/v6/model/post_metadata.go similarity index 68% rename from vendor/github.com/mattermost/mattermost-server/v5/model/post_metadata.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/post_metadata.go index 7b0687ca..6ccc1ebf 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/post_metadata.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/post_metadata.go @@ -3,10 +3,6 @@ package model -import ( - "encoding/json" -) - type PostMetadata struct { // Embeds holds information required to render content embedded in the post. This includes the OpenGraph metadata // for links in the post. @@ -18,7 +14,7 @@ type PostMetadata struct { // Files holds information about the file attachments on the post. Files []*FileInfo `json:"files,omitempty"` - // Images holds the dimensions of all external images in the post as a map of the image URL to its diemsnions. + // Images holds the dimensions of all external images in the post as a map of the image URL to its dimensions. // This includes image embeds (when the message contains a plaintext link to an image), Markdown images, images // contained in the OpenGraph metadata, and images contained in message attachments. It does not contain // the dimensions of any file attachments as those are stored in FileInfos. @@ -39,7 +35,30 @@ type PostImage struct { FrameCount int `json:"frame_count"` } -func (o *PostImage) ToJson() string { - b, _ := json.Marshal(o) - return string(b) +// Copy does a deep copy +func (p *PostMetadata) Copy() *PostMetadata { + embedsCopy := make([]*PostEmbed, len(p.Embeds)) + copy(embedsCopy, p.Embeds) + + emojisCopy := make([]*Emoji, len(p.Emojis)) + copy(emojisCopy, p.Emojis) + + filesCopy := make([]*FileInfo, len(p.Files)) + copy(filesCopy, p.Files) + + imagesCopy := map[string]*PostImage{} + for k, v := range p.Images { + imagesCopy[k] = v + } + + reactionsCopy := make([]*Reaction, len(p.Reactions)) + copy(reactionsCopy, p.Reactions) + + return &PostMetadata{ + Embeds: embedsCopy, + Emojis: emojisCopy, + Files: filesCopy, + Images: imagesCopy, + Reactions: reactionsCopy, + } } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/post_search_results.go b/vendor/github.com/mattermost/mattermost-server/v6/model/post_search_results.go similarity index 68% rename from vendor/github.com/mattermost/mattermost-server/v5/model/post_search_results.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/post_search_results.go index 74ef4b52..a3afc723 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/post_search_results.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/post_search_results.go @@ -22,19 +22,14 @@ func MakePostSearchResults(posts *PostList, matches PostSearchMatches) *PostSear } } -func (o *PostSearchResults) ToJson() string { +func (o *PostSearchResults) ToJSON() (string, error) { copy := *o copy.PostList.StripActionIntegrations() b, err := json.Marshal(©) - if err != nil { - return "" - } else { - return string(b) - } + return string(b), err } -func PostSearchResultsFromJson(data io.Reader) *PostSearchResults { - var o *PostSearchResults - json.NewDecoder(data).Decode(&o) - return o +func (o *PostSearchResults) EncodeJSON(w io.Writer) error { + o.PostList.StripActionIntegrations() + return json.NewEncoder(w).Encode(o) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go b/vendor/github.com/mattermost/mattermost-server/v6/model/preference.go similarity index 51% rename from vendor/github.com/mattermost/mattermost-server/v5/model/preference.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/preference.go index e752bb54..41a58235 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/preference.go @@ -5,7 +5,6 @@ package model import ( "encoding/json" - "io" "net/http" "regexp" "strings" @@ -13,41 +12,49 @@ import ( ) const ( - PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW = "direct_channel_show" - PREFERENCE_CATEGORY_GROUP_CHANNEL_SHOW = "group_channel_show" - PREFERENCE_CATEGORY_TUTORIAL_STEPS = "tutorial_step" - PREFERENCE_CATEGORY_ADVANCED_SETTINGS = "advanced_settings" - PREFERENCE_CATEGORY_FLAGGED_POST = "flagged_post" - PREFERENCE_CATEGORY_FAVORITE_CHANNEL = "favorite_channel" - PREFERENCE_CATEGORY_SIDEBAR_SETTINGS = "sidebar_settings" - - PREFERENCE_CATEGORY_DISPLAY_SETTINGS = "display_settings" - PREFERENCE_NAME_CHANNEL_DISPLAY_MODE = "channel_display_mode" - PREFERENCE_NAME_COLLAPSE_SETTING = "collapse_previews" - PREFERENCE_NAME_MESSAGE_DISPLAY = "message_display" - PREFERENCE_NAME_NAME_FORMAT = "name_format" - PREFERENCE_NAME_USE_MILITARY_TIME = "use_military_time" - - PREFERENCE_CATEGORY_THEME = "theme" + PreferenceCategoryDirectChannelShow = "direct_channel_show" + PreferenceCategoryGroupChannelShow = "group_channel_show" + PreferenceCategoryTutorialSteps = "tutorial_step" + PreferenceCategoryAdvancedSettings = "advanced_settings" + PreferenceCategoryFlaggedPost = "flagged_post" + PreferenceCategoryFavoriteChannel = "favorite_channel" + PreferenceCategorySidebarSettings = "sidebar_settings" + + PreferenceCategoryDisplaySettings = "display_settings" + PreferenceNameCollapsedThreadsEnabled = "collapsed_reply_threads" + PreferenceNameChannelDisplayMode = "channel_display_mode" + PreferenceNameCollapseSetting = "collapse_previews" + PreferenceNameMessageDisplay = "message_display" + PreferenceNameNameFormat = "name_format" + PreferenceNameUseMilitaryTime = "use_military_time" + PreferenceRecommendedNextSteps = "recommended_next_steps" + + PreferenceCategoryTheme = "theme" // the name for theme props is the team id - PREFERENCE_CATEGORY_AUTHORIZED_OAUTH_APP = "oauth_app" + PreferenceCategoryAuthorizedOAuthApp = "oauth_app" // the name for oauth_app is the client_id and value is the current scope - PREFERENCE_CATEGORY_LAST = "last" - PREFERENCE_NAME_LAST_CHANNEL = "channel" - PREFERENCE_NAME_LAST_TEAM = "team" + PreferenceCategoryLast = "last" + PreferenceNameLastChannel = "channel" + PreferenceNameLastTeam = "team" - PREFERENCE_CATEGORY_NOTIFICATIONS = "notifications" - PREFERENCE_NAME_EMAIL_INTERVAL = "email_interval" + PreferenceCategoryCustomStatus = "custom_status" + PreferenceNameRecentCustomStatuses = "recent_custom_statuses" + PreferenceNameCustomStatusTutorialState = "custom_status_tutorial_state" - PREFERENCE_EMAIL_INTERVAL_NO_BATCHING_SECONDS = "30" // the "immediate" setting is actually 30s - PREFERENCE_EMAIL_INTERVAL_BATCHING_SECONDS = "900" // fifteen minutes is 900 seconds - PREFERENCE_EMAIL_INTERVAL_IMMEDIATELY = "immediately" - PREFERENCE_EMAIL_INTERVAL_FIFTEEN = "fifteen" - PREFERENCE_EMAIL_INTERVAL_FIFTEEN_AS_SECONDS = "900" - PREFERENCE_EMAIL_INTERVAL_HOUR = "hour" - PREFERENCE_EMAIL_INTERVAL_HOUR_AS_SECONDS = "3600" + PreferenceCustomStatusModalViewed = "custom_status_modal_viewed" + + PreferenceCategoryNotifications = "notifications" + PreferenceNameEmailInterval = "email_interval" + + PreferenceEmailIntervalNoBatchingSeconds = "30" // the "immediate" setting is actually 30s + PreferenceEmailIntervalBatchingSeconds = "900" // fifteen minutes is 900 seconds + PreferenceEmailIntervalImmediately = "immediately" + PreferenceEmailIntervalFifteen = "fifteen" + PreferenceEmailIntervalFifteenAsSeconds = "900" + PreferenceEmailIntervalHour = "hour" + PreferenceEmailIntervalHourAsSeconds = "3600" ) type Preference struct { @@ -57,23 +64,14 @@ type Preference struct { Value string `json:"value"` } -func (o *Preference) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func PreferenceFromJson(data io.Reader) *Preference { - var o *Preference - json.NewDecoder(data).Decode(&o) - return o -} +type Preferences []Preference func (o *Preference) IsValid() *AppError { if !IsValidId(o.UserId) { return NewAppError("Preference.IsValid", "model.preference.is_valid.id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) } - if len(o.Category) == 0 || len(o.Category) > 32 { + if o.Category == "" || len(o.Category) > 32 { return NewAppError("Preference.IsValid", "model.preference.is_valid.category.app_error", nil, "category="+o.Category, http.StatusBadRequest) } @@ -85,7 +83,7 @@ func (o *Preference) IsValid() *AppError { return NewAppError("Preference.IsValid", "model.preference.is_valid.value.app_error", nil, "value="+o.Value, http.StatusBadRequest) } - if o.Category == PREFERENCE_CATEGORY_THEME { + if o.Category == PreferenceCategoryTheme { var unused map[string]string if err := json.NewDecoder(strings.NewReader(o.Value)).Decode(&unused); err != nil { return NewAppError("Preference.IsValid", "model.preference.is_valid.theme.app_error", nil, "value="+o.Value, http.StatusBadRequest) @@ -96,7 +94,7 @@ func (o *Preference) IsValid() *AppError { } func (o *Preference) PreUpdate() { - if o.Category == PREFERENCE_CATEGORY_THEME { + if o.Category == PreferenceCategoryTheme { // decode the value of theme (a map of strings to string) and eliminate any invalid values var props map[string]string if err := json.NewDecoder(strings.NewReader(o.Value)).Decode(&props); err != nil { diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/product_notices.go b/vendor/github.com/mattermost/mattermost-server/v6/model/product_notices.go new file mode 100644 index 00000000..94343cb4 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/product_notices.go @@ -0,0 +1,220 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + + "github.com/pkg/errors" +) + +type ProductNotices []ProductNotice + +func (r *ProductNotices) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +func UnmarshalProductNotices(data []byte) (ProductNotices, error) { + var r ProductNotices + err := json.Unmarshal(data, &r) + return r, err +} + +// List of product notices. Order is important and is used to resolve priorities. +// Each notice will only be show if conditions are met. +type ProductNotice struct { + Conditions Conditions `json:"conditions"` + ID string `json:"id"` // Unique identifier for this notice. Can be a running number. Used for storing 'viewed'; state on the server. + LocalizedMessages map[string]NoticeMessageInternal `json:"localizedMessages"` // Notice message data, organized by locale.; Example:; "localizedMessages": {; "en": { "title": "English", description: "English description"},; "frFR": { "title": "Frances", description: "French description"}; } + Repeatable *bool `json:"repeatable,omitempty"` // Configurable flag if the notice should reappear after it’s seen and dismissed +} + +func (n *ProductNotice) SysAdminOnly() bool { + return n.Conditions.Audience != nil && *n.Conditions.Audience == NoticeAudienceSysadmin +} + +func (n *ProductNotice) TeamAdminOnly() bool { + return n.Conditions.Audience != nil && *n.Conditions.Audience == NoticeAudienceTeamAdmin +} + +type Conditions struct { + Audience *NoticeAudience `json:"audience,omitempty"` + ClientType *NoticeClientType `json:"clientType,omitempty"` // Only show the notice on specific clients. Defaults to 'all' + DesktopVersion []string `json:"desktopVersion,omitempty"` // What desktop client versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["= 2020-03-01T00:00:00Z" - show after specified date; "< 2020-03-01T00:00:00Z" - show before the specified date; "> 2020-03-01T00:00:00Z <= 2020-04-01T00:00:00Z" - show only between the specified dates + InstanceType *NoticeInstanceType `json:"instanceType,omitempty"` + MobileVersion []string `json:"mobileVersion,omitempty"` // What mobile client versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["=1.2.3 < ~2.4.x"]; Example: [" -1 { - me.Platform = deviceId[:index] - me.DeviceId = deviceId[index+1:] - } -} - -func PushNotificationFromJson(data io.Reader) (*PushNotification, error) { - if data == nil { - return nil, errors.New("push notification data can't be nil") - } - var me *PushNotification - if err := json.NewDecoder(data).Decode(&me); err != nil { - return nil, err + pn.Platform = deviceId[:index] + pn.DeviceId = deviceId[index+1:] } - return me, nil -} - -func PushNotificationAckFromJson(data io.Reader) (*PushNotificationAck, error) { - if data == nil { - return nil, errors.New("push notification data can't be nil") - } - var ack *PushNotificationAck - if err := json.NewDecoder(data).Decode(&ack); err != nil { - return nil, err - } - return ack, nil -} - -func (ack *PushNotificationAck) ToJson() string { - b, _ := json.Marshal(ack) - return string(b) } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/push_response.go b/vendor/github.com/mattermost/mattermost-server/v6/model/push_response.go new file mode 100644 index 00000000..a88b4339 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/push_response.go @@ -0,0 +1,33 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + PushStatus = "status" + PushStatusOk = "OK" + PushStatusFail = "FAIL" + PushStatusRemove = "REMOVE" + PushStatusErrorMsg = "error" +) + +type PushResponse map[string]string + +func NewOkPushResponse() PushResponse { + m := make(map[string]string) + m[PushStatus] = PushStatusOk + return m +} + +func NewRemovePushResponse() PushResponse { + m := make(map[string]string) + m[PushStatus] = PushStatusRemove + return m +} + +func NewErrorPushResponse(message string) PushResponse { + m := make(map[string]string) + m[PushStatus] = PushStatusFail + m[PushStatusErrorMsg] = message + return m +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/reaction.go b/vendor/github.com/mattermost/mattermost-server/v6/model/reaction.go new file mode 100644 index 00000000..335cb904 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/reaction.go @@ -0,0 +1,65 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" + "regexp" +) + +type Reaction struct { + UserId string `json:"user_id"` + PostId string `json:"post_id"` + EmojiName string `json:"emoji_name"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + RemoteId *string `json:"remote_id"` +} + +func (o *Reaction) IsValid() *AppError { + if !IsValidId(o.UserId) { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.user_id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) + } + + if !IsValidId(o.PostId) { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.post_id.app_error", nil, "post_id="+o.PostId, http.StatusBadRequest) + } + + validName := regexp.MustCompile(`^[a-zA-Z0-9\-\+_]+$`) + + if o.EmojiName == "" || len(o.EmojiName) > EmojiNameMaxLength || !validName.MatchString(o.EmojiName) { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.emoji_name.app_error", nil, "emoji_name="+o.EmojiName, http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + + if o.UpdateAt == 0 { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.update_at.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *Reaction) PreSave() { + if o.CreateAt == 0 { + o.CreateAt = GetMillis() + } + o.UpdateAt = GetMillis() + o.DeleteAt = 0 + + if o.RemoteId == nil { + o.RemoteId = NewString("") + } +} + +func (o *Reaction) PreUpdate() { + o.UpdateAt = GetMillis() + + if o.RemoteId == nil { + o.RemoteId = NewString("") + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/remote_cluster.go b/vendor/github.com/mattermost/mattermost-server/v6/model/remote_cluster.go new file mode 100644 index 00000000..44d11f16 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/remote_cluster.go @@ -0,0 +1,302 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/json" + "errors" + "io" + "net/http" + "regexp" + "strings" + + "golang.org/x/crypto/scrypt" +) + +const ( + RemoteOfflineAfterMillis = 1000 * 60 * 5 // 5 minutes + RemoteNameMinLength = 1 + RemoteNameMaxLength = 64 +) + +var ( + validRemoteNameChars = regexp.MustCompile(`^[a-zA-Z0-9\.\-\_]+$`) +) + +type RemoteCluster struct { + RemoteId string `json:"remote_id"` + RemoteTeamId string `json:"remote_team_id"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + SiteURL string `json:"site_url"` + CreateAt int64 `json:"create_at"` + LastPingAt int64 `json:"last_ping_at"` + Token string `json:"token"` + RemoteToken string `json:"remote_token"` + Topics string `json:"topics"` + CreatorId string `json:"creator_id"` +} + +func (rc *RemoteCluster) PreSave() { + if rc.RemoteId == "" { + rc.RemoteId = NewId() + } + + if rc.DisplayName == "" { + rc.DisplayName = rc.Name + } + + rc.Name = SanitizeUnicode(rc.Name) + rc.DisplayName = SanitizeUnicode(rc.DisplayName) + rc.Name = NormalizeRemoteName(rc.Name) + + if rc.Token == "" { + rc.Token = NewId() + } + + if rc.CreateAt == 0 { + rc.CreateAt = GetMillis() + } + rc.fixTopics() +} + +func (rc *RemoteCluster) IsValid() *AppError { + if !IsValidId(rc.RemoteId) { + return NewAppError("RemoteCluster.IsValid", "model.cluster.is_valid.id.app_error", nil, "id="+rc.RemoteId, http.StatusBadRequest) + } + + if !IsValidRemoteName(rc.Name) { + return NewAppError("RemoteCluster.IsValid", "model.cluster.is_valid.name.app_error", nil, "name="+rc.Name, http.StatusBadRequest) + } + + if rc.CreateAt == 0 { + return NewAppError("RemoteCluster.IsValid", "model.cluster.is_valid.create_at.app_error", nil, "create_at=0", http.StatusBadRequest) + } + + if !IsValidId(rc.CreatorId) { + return NewAppError("RemoteCluster.IsValid", "model.cluster.is_valid.id.app_error", nil, "creator_id="+rc.CreatorId, http.StatusBadRequest) + } + return nil +} + +func IsValidRemoteName(s string) bool { + if len(s) < RemoteNameMinLength || len(s) > RemoteNameMaxLength { + return false + } + return validRemoteNameChars.MatchString(s) +} + +func (rc *RemoteCluster) PreUpdate() { + if rc.DisplayName == "" { + rc.DisplayName = rc.Name + } + + rc.Name = SanitizeUnicode(rc.Name) + rc.DisplayName = SanitizeUnicode(rc.DisplayName) + rc.Name = NormalizeRemoteName(rc.Name) + rc.fixTopics() +} + +func (rc *RemoteCluster) IsOnline() bool { + return rc.LastPingAt > GetMillis()-RemoteOfflineAfterMillis +} + +// fixTopics ensures all topics are separated by one, and only one, space. +func (rc *RemoteCluster) fixTopics() { + trimmed := strings.TrimSpace(rc.Topics) + if trimmed == "" || trimmed == "*" { + rc.Topics = trimmed + return + } + + var sb strings.Builder + sb.WriteString(" ") + + ss := strings.Split(rc.Topics, " ") + for _, c := range ss { + cc := strings.TrimSpace(c) + if cc != "" { + sb.WriteString(cc) + sb.WriteString(" ") + } + } + rc.Topics = sb.String() +} + +func (rc *RemoteCluster) ToRemoteClusterInfo() RemoteClusterInfo { + return RemoteClusterInfo{ + Name: rc.Name, + DisplayName: rc.DisplayName, + CreateAt: rc.CreateAt, + LastPingAt: rc.LastPingAt, + } +} + +func NormalizeRemoteName(name string) string { + return strings.ToLower(name) +} + +// RemoteClusterInfo provides a subset of RemoteCluster fields suitable for sending to clients. +type RemoteClusterInfo struct { + Name string `json:"name"` + DisplayName string `json:"display_name"` + CreateAt int64 `json:"create_at"` + LastPingAt int64 `json:"last_ping_at"` +} + +// RemoteClusterFrame wraps a `RemoteClusterMsg` with credentials specific to a remote cluster. +type RemoteClusterFrame struct { + RemoteId string `json:"remote_id"` + Msg RemoteClusterMsg `json:"msg"` +} + +func (f *RemoteClusterFrame) IsValid() *AppError { + if !IsValidId(f.RemoteId) { + return NewAppError("RemoteClusterFrame.IsValid", "api.remote_cluster.invalid_id.app_error", nil, "RemoteId="+f.RemoteId, http.StatusBadRequest) + } + + if err := f.Msg.IsValid(); err != nil { + return err + } + + return nil +} + +// RemoteClusterMsg represents a message that is sent and received between clusters. +// These are processed and routed via the RemoteClusters service. +type RemoteClusterMsg struct { + Id string `json:"id"` + Topic string `json:"topic"` + CreateAt int64 `json:"create_at"` + Payload json.RawMessage `json:"payload"` +} + +func NewRemoteClusterMsg(topic string, payload json.RawMessage) RemoteClusterMsg { + return RemoteClusterMsg{ + Id: NewId(), + Topic: topic, + CreateAt: GetMillis(), + Payload: payload, + } +} + +func (m RemoteClusterMsg) IsValid() *AppError { + if !IsValidId(m.Id) { + return NewAppError("RemoteClusterMsg.IsValid", "api.remote_cluster.invalid_id.app_error", nil, "Id="+m.Id, http.StatusBadRequest) + } + + if m.Topic == "" { + return NewAppError("RemoteClusterMsg.IsValid", "api.remote_cluster.invalid_topic.app_error", nil, "Topic empty", http.StatusBadRequest) + } + + if len(m.Payload) == 0 { + return NewAppError("RemoteClusterMsg.IsValid", "api.context.invalid_body_param.app_error", map[string]interface{}{"Name": "PayLoad"}, "", http.StatusBadRequest) + } + + return nil +} + +// RemoteClusterPing represents a ping that is sent and received between clusters +// to indicate a connection is alive. This is the payload for a `RemoteClusterMsg`. +type RemoteClusterPing struct { + SentAt int64 `json:"sent_at"` + RecvAt int64 `json:"recv_at"` +} + +// RemoteClusterInvite represents an invitation to establish a simple trust with a remote cluster. +type RemoteClusterInvite struct { + RemoteId string `json:"remote_id"` + RemoteTeamId string `json:"remote_team_id"` + SiteURL string `json:"site_url"` + Token string `json:"token"` +} + +func (rci *RemoteClusterInvite) Encrypt(password string) ([]byte, error) { + raw, err := json.Marshal(&rci) + if err != nil { + return nil, err + } + + // create random salt to be prepended to the blob. + salt := make([]byte, 16) + if _, err = io.ReadFull(rand.Reader, salt); err != nil { + return nil, err + } + + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 1, 32) + if err != nil { + return nil, err + } + + block, err := aes.NewCipher(key[:]) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + // create random nonce + nonce := make([]byte, gcm.NonceSize()) + if _, err = io.ReadFull(rand.Reader, nonce); err != nil { + return nil, err + } + + // prefix the nonce to the cyphertext so we don't need to keep track of it. + sealed := gcm.Seal(nonce, nonce, raw, nil) + + return append(salt, sealed...), nil +} + +func (rci *RemoteClusterInvite) Decrypt(encrypted []byte, password string) error { + if len(encrypted) <= 16 { + return errors.New("invalid length") + } + + // first 16 bytes is the salt that was used to derive a key + salt := encrypted[:16] + encrypted = encrypted[16:] + + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 1, 32) + if err != nil { + return err + } + + block, err := aes.NewCipher(key[:]) + if err != nil { + return err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return err + } + + // nonce was prefixed to the cyphertext when encrypting so we need to extract it. + nonceSize := gcm.NonceSize() + nonce, cyphertext := encrypted[:nonceSize], encrypted[nonceSize:] + + plain, err := gcm.Open(nil, nonce, cyphertext, nil) + if err != nil { + return err + } + + // try to unmarshall the decrypted JSON to this invite struct. + return json.Unmarshal(plain, &rci) +} + +// RemoteClusterQueryFilter provides filter criteria for RemoteClusterStore.GetAll +type RemoteClusterQueryFilter struct { + ExcludeOffline bool + InChannel string + NotInChannel string + Topic string + CreatorId string + OnlyConfirmed bool +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/role.go b/vendor/github.com/mattermost/mattermost-server/v6/model/role.go new file mode 100644 index 00000000..081e7b68 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/role.go @@ -0,0 +1,1023 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "fmt" + "strings" +) + +// SysconsoleAncillaryPermissions maps the non-sysconsole permissions required by each sysconsole view. +var SysconsoleAncillaryPermissions map[string][]*Permission +var SystemManagerDefaultPermissions []string +var SystemUserManagerDefaultPermissions []string +var SystemReadOnlyAdminDefaultPermissions []string + +var BuiltInSchemeManagedRoleIDs []string + +var NewSystemRoleIDs []string + +func init() { + NewSystemRoleIDs = []string{ + SystemUserManagerRoleId, + SystemReadOnlyAdminRoleId, + SystemManagerRoleId, + } + + BuiltInSchemeManagedRoleIDs = append([]string{ + SystemGuestRoleId, + SystemUserRoleId, + SystemAdminRoleId, + SystemPostAllRoleId, + SystemPostAllPublicRoleId, + SystemUserAccessTokenRoleId, + + TeamGuestRoleId, + TeamUserRoleId, + TeamAdminRoleId, + TeamPostAllRoleId, + TeamPostAllPublicRoleId, + + ChannelGuestRoleId, + ChannelUserRoleId, + ChannelAdminRoleId, + + CustomGroupUserRoleId, + + PlaybookAdminRoleId, + PlaybookMemberRoleId, + RunAdminRoleId, + RunMemberRoleId, + }, NewSystemRoleIDs...) + + // When updating the values here, the values in mattermost-redux must also be updated. + SysconsoleAncillaryPermissions = map[string][]*Permission{ + PermissionSysconsoleReadAboutEditionAndLicense.Id: { + PermissionReadLicenseInformation, + }, + PermissionSysconsoleWriteAboutEditionAndLicense.Id: { + PermissionManageLicenseInformation, + }, + PermissionSysconsoleReadUserManagementChannels.Id: { + PermissionReadPublicChannel, + PermissionReadChannel, + PermissionReadPublicChannelGroups, + PermissionReadPrivateChannelGroups, + }, + PermissionSysconsoleReadUserManagementUsers.Id: { + PermissionReadOtherUsersTeams, + PermissionGetAnalytics, + }, + PermissionSysconsoleReadUserManagementTeams.Id: { + PermissionListPrivateTeams, + PermissionListPublicTeams, + PermissionViewTeam, + }, + PermissionSysconsoleReadEnvironmentElasticsearch.Id: { + PermissionReadElasticsearchPostIndexingJob, + PermissionReadElasticsearchPostAggregationJob, + }, + PermissionSysconsoleWriteEnvironmentWebServer.Id: { + PermissionTestSiteURL, + PermissionReloadConfig, + PermissionInvalidateCaches, + }, + PermissionSysconsoleWriteEnvironmentDatabase.Id: { + PermissionRecycleDatabaseConnections, + }, + PermissionSysconsoleWriteEnvironmentElasticsearch.Id: { + PermissionTestElasticsearch, + PermissionCreateElasticsearchPostIndexingJob, + PermissionCreateElasticsearchPostAggregationJob, + PermissionPurgeElasticsearchIndexes, + }, + PermissionSysconsoleWriteEnvironmentFileStorage.Id: { + PermissionTestS3, + }, + PermissionSysconsoleWriteEnvironmentSMTP.Id: { + PermissionTestEmail, + }, + PermissionSysconsoleReadReportingServerLogs.Id: { + PermissionGetLogs, + }, + PermissionSysconsoleReadReportingSiteStatistics.Id: { + PermissionGetAnalytics, + }, + PermissionSysconsoleReadReportingTeamStatistics.Id: { + PermissionViewTeam, + }, + PermissionSysconsoleWriteUserManagementUsers.Id: { + PermissionEditOtherUsers, + PermissionDemoteToGuest, + PermissionPromoteGuest, + }, + PermissionSysconsoleWriteUserManagementChannels.Id: { + PermissionManageTeam, + PermissionManagePublicChannelProperties, + PermissionManagePrivateChannelProperties, + PermissionManagePrivateChannelMembers, + PermissionManagePublicChannelMembers, + PermissionDeletePrivateChannel, + PermissionDeletePublicChannel, + PermissionManageChannelRoles, + PermissionConvertPublicChannelToPrivate, + PermissionConvertPrivateChannelToPublic, + }, + PermissionSysconsoleWriteUserManagementTeams.Id: { + PermissionManageTeam, + PermissionManageTeamRoles, + PermissionRemoveUserFromTeam, + PermissionJoinPrivateTeams, + PermissionJoinPublicTeams, + PermissionAddUserToTeam, + }, + PermissionSysconsoleWriteUserManagementGroups.Id: { + PermissionManageTeam, + PermissionManagePrivateChannelMembers, + PermissionManagePublicChannelMembers, + PermissionConvertPublicChannelToPrivate, + PermissionConvertPrivateChannelToPublic, + }, + PermissionSysconsoleWriteSiteCustomization.Id: { + PermissionEditBrand, + }, + PermissionSysconsoleWriteComplianceDataRetentionPolicy.Id: { + PermissionCreateDataRetentionJob, + }, + PermissionSysconsoleReadComplianceDataRetentionPolicy.Id: { + PermissionReadDataRetentionJob, + }, + PermissionSysconsoleWriteComplianceComplianceExport.Id: { + PermissionCreateComplianceExportJob, + PermissionDownloadComplianceExportResult, + }, + PermissionSysconsoleReadComplianceComplianceExport.Id: { + PermissionReadComplianceExportJob, + PermissionDownloadComplianceExportResult, + }, + PermissionSysconsoleReadComplianceCustomTermsOfService.Id: { + PermissionReadAudits, + }, + PermissionSysconsoleWriteExperimentalBleve.Id: { + PermissionCreatePostBleveIndexesJob, + PermissionPurgeBleveIndexes, + }, + PermissionSysconsoleWriteAuthenticationLdap.Id: { + PermissionCreateLdapSyncJob, + PermissionAddLdapPublicCert, + PermissionRemoveLdapPublicCert, + PermissionAddLdapPrivateCert, + PermissionRemoveLdapPrivateCert, + }, + PermissionSysconsoleReadAuthenticationLdap.Id: { + PermissionTestLdap, + PermissionReadLdapSyncJob, + }, + PermissionSysconsoleWriteAuthenticationEmail.Id: { + PermissionInvalidateEmailInvite, + }, + PermissionSysconsoleWriteAuthenticationSaml.Id: { + PermissionGetSamlMetadataFromIdp, + PermissionAddSamlPublicCert, + PermissionAddSamlPrivateCert, + PermissionAddSamlIdpCert, + PermissionRemoveSamlPublicCert, + PermissionRemoveSamlPrivateCert, + PermissionRemoveSamlIdpCert, + PermissionGetSamlCertStatus, + }, + } + + SystemUserManagerDefaultPermissions = []string{ + PermissionSysconsoleReadUserManagementGroups.Id, + PermissionSysconsoleReadUserManagementTeams.Id, + PermissionSysconsoleReadUserManagementChannels.Id, + PermissionSysconsoleReadUserManagementPermissions.Id, + PermissionSysconsoleWriteUserManagementGroups.Id, + PermissionSysconsoleWriteUserManagementTeams.Id, + PermissionSysconsoleWriteUserManagementChannels.Id, + PermissionSysconsoleReadAuthenticationSignup.Id, + PermissionSysconsoleReadAuthenticationEmail.Id, + PermissionSysconsoleReadAuthenticationPassword.Id, + PermissionSysconsoleReadAuthenticationMfa.Id, + PermissionSysconsoleReadAuthenticationLdap.Id, + PermissionSysconsoleReadAuthenticationSaml.Id, + PermissionSysconsoleReadAuthenticationOpenid.Id, + PermissionSysconsoleReadAuthenticationGuestAccess.Id, + } + + SystemReadOnlyAdminDefaultPermissions = []string{ + PermissionSysconsoleReadAboutEditionAndLicense.Id, + PermissionSysconsoleReadReportingSiteStatistics.Id, + PermissionSysconsoleReadReportingTeamStatistics.Id, + PermissionSysconsoleReadReportingServerLogs.Id, + PermissionSysconsoleReadUserManagementUsers.Id, + PermissionSysconsoleReadUserManagementGroups.Id, + PermissionSysconsoleReadUserManagementTeams.Id, + PermissionSysconsoleReadUserManagementChannels.Id, + PermissionSysconsoleReadUserManagementPermissions.Id, + PermissionSysconsoleReadEnvironmentWebServer.Id, + PermissionSysconsoleReadEnvironmentDatabase.Id, + PermissionSysconsoleReadEnvironmentElasticsearch.Id, + PermissionSysconsoleReadEnvironmentFileStorage.Id, + PermissionSysconsoleReadEnvironmentImageProxy.Id, + PermissionSysconsoleReadEnvironmentSMTP.Id, + PermissionSysconsoleReadEnvironmentPushNotificationServer.Id, + PermissionSysconsoleReadEnvironmentHighAvailability.Id, + PermissionSysconsoleReadEnvironmentRateLimiting.Id, + PermissionSysconsoleReadEnvironmentLogging.Id, + PermissionSysconsoleReadEnvironmentSessionLengths.Id, + PermissionSysconsoleReadEnvironmentPerformanceMonitoring.Id, + PermissionSysconsoleReadEnvironmentDeveloper.Id, + PermissionSysconsoleReadSiteCustomization.Id, + PermissionSysconsoleReadSiteLocalization.Id, + PermissionSysconsoleReadSiteUsersAndTeams.Id, + PermissionSysconsoleReadSiteNotifications.Id, + PermissionSysconsoleReadSiteAnnouncementBanner.Id, + PermissionSysconsoleReadSiteEmoji.Id, + PermissionSysconsoleReadSitePosts.Id, + PermissionSysconsoleReadSiteFileSharingAndDownloads.Id, + PermissionSysconsoleReadSitePublicLinks.Id, + PermissionSysconsoleReadSiteNotices.Id, + PermissionSysconsoleReadAuthenticationSignup.Id, + PermissionSysconsoleReadAuthenticationEmail.Id, + PermissionSysconsoleReadAuthenticationPassword.Id, + PermissionSysconsoleReadAuthenticationMfa.Id, + PermissionSysconsoleReadAuthenticationLdap.Id, + PermissionSysconsoleReadAuthenticationSaml.Id, + PermissionSysconsoleReadAuthenticationOpenid.Id, + PermissionSysconsoleReadAuthenticationGuestAccess.Id, + PermissionSysconsoleReadPlugins.Id, + PermissionSysconsoleReadIntegrationsIntegrationManagement.Id, + PermissionSysconsoleReadIntegrationsBotAccounts.Id, + PermissionSysconsoleReadIntegrationsGif.Id, + PermissionSysconsoleReadIntegrationsCors.Id, + PermissionSysconsoleReadComplianceDataRetentionPolicy.Id, + PermissionSysconsoleReadComplianceComplianceExport.Id, + PermissionSysconsoleReadComplianceComplianceMonitoring.Id, + PermissionSysconsoleReadComplianceCustomTermsOfService.Id, + PermissionSysconsoleReadExperimentalFeatures.Id, + PermissionSysconsoleReadExperimentalFeatureFlags.Id, + PermissionSysconsoleReadExperimentalBleve.Id, + } + + SystemManagerDefaultPermissions = []string{ + PermissionSysconsoleReadAboutEditionAndLicense.Id, + PermissionSysconsoleReadReportingSiteStatistics.Id, + PermissionSysconsoleReadReportingTeamStatistics.Id, + PermissionSysconsoleReadReportingServerLogs.Id, + PermissionSysconsoleReadUserManagementGroups.Id, + PermissionSysconsoleReadUserManagementTeams.Id, + PermissionSysconsoleReadUserManagementChannels.Id, + PermissionSysconsoleReadUserManagementPermissions.Id, + PermissionSysconsoleWriteUserManagementGroups.Id, + PermissionSysconsoleWriteUserManagementTeams.Id, + PermissionSysconsoleWriteUserManagementChannels.Id, + PermissionSysconsoleWriteUserManagementPermissions.Id, + PermissionSysconsoleReadEnvironmentWebServer.Id, + PermissionSysconsoleReadEnvironmentDatabase.Id, + PermissionSysconsoleReadEnvironmentElasticsearch.Id, + PermissionSysconsoleReadEnvironmentFileStorage.Id, + PermissionSysconsoleReadEnvironmentImageProxy.Id, + PermissionSysconsoleReadEnvironmentSMTP.Id, + PermissionSysconsoleReadEnvironmentPushNotificationServer.Id, + PermissionSysconsoleReadEnvironmentHighAvailability.Id, + PermissionSysconsoleReadEnvironmentRateLimiting.Id, + PermissionSysconsoleReadEnvironmentLogging.Id, + PermissionSysconsoleReadEnvironmentSessionLengths.Id, + PermissionSysconsoleReadEnvironmentPerformanceMonitoring.Id, + PermissionSysconsoleReadEnvironmentDeveloper.Id, + PermissionSysconsoleWriteEnvironmentWebServer.Id, + PermissionSysconsoleWriteEnvironmentDatabase.Id, + PermissionSysconsoleWriteEnvironmentElasticsearch.Id, + PermissionSysconsoleWriteEnvironmentFileStorage.Id, + PermissionSysconsoleWriteEnvironmentImageProxy.Id, + PermissionSysconsoleWriteEnvironmentSMTP.Id, + PermissionSysconsoleWriteEnvironmentPushNotificationServer.Id, + PermissionSysconsoleWriteEnvironmentHighAvailability.Id, + PermissionSysconsoleWriteEnvironmentRateLimiting.Id, + PermissionSysconsoleWriteEnvironmentLogging.Id, + PermissionSysconsoleWriteEnvironmentSessionLengths.Id, + PermissionSysconsoleWriteEnvironmentPerformanceMonitoring.Id, + PermissionSysconsoleWriteEnvironmentDeveloper.Id, + PermissionSysconsoleReadSiteCustomization.Id, + PermissionSysconsoleWriteSiteCustomization.Id, + PermissionSysconsoleReadSiteLocalization.Id, + PermissionSysconsoleWriteSiteLocalization.Id, + PermissionSysconsoleReadSiteUsersAndTeams.Id, + PermissionSysconsoleWriteSiteUsersAndTeams.Id, + PermissionSysconsoleReadSiteNotifications.Id, + PermissionSysconsoleWriteSiteNotifications.Id, + PermissionSysconsoleReadSiteAnnouncementBanner.Id, + PermissionSysconsoleWriteSiteAnnouncementBanner.Id, + PermissionSysconsoleReadSiteEmoji.Id, + PermissionSysconsoleWriteSiteEmoji.Id, + PermissionSysconsoleReadSitePosts.Id, + PermissionSysconsoleWriteSitePosts.Id, + PermissionSysconsoleReadSiteFileSharingAndDownloads.Id, + PermissionSysconsoleWriteSiteFileSharingAndDownloads.Id, + PermissionSysconsoleReadSitePublicLinks.Id, + PermissionSysconsoleWriteSitePublicLinks.Id, + PermissionSysconsoleReadSiteNotices.Id, + PermissionSysconsoleWriteSiteNotices.Id, + PermissionSysconsoleReadAuthenticationSignup.Id, + PermissionSysconsoleReadAuthenticationEmail.Id, + PermissionSysconsoleReadAuthenticationPassword.Id, + PermissionSysconsoleReadAuthenticationMfa.Id, + PermissionSysconsoleReadAuthenticationLdap.Id, + PermissionSysconsoleReadAuthenticationSaml.Id, + PermissionSysconsoleReadAuthenticationOpenid.Id, + PermissionSysconsoleReadAuthenticationGuestAccess.Id, + PermissionSysconsoleReadPlugins.Id, + PermissionSysconsoleReadIntegrationsIntegrationManagement.Id, + PermissionSysconsoleReadIntegrationsBotAccounts.Id, + PermissionSysconsoleReadIntegrationsGif.Id, + PermissionSysconsoleReadIntegrationsCors.Id, + PermissionSysconsoleWriteIntegrationsIntegrationManagement.Id, + PermissionSysconsoleWriteIntegrationsBotAccounts.Id, + PermissionSysconsoleWriteIntegrationsGif.Id, + PermissionSysconsoleWriteIntegrationsCors.Id, + } + + // Add the ancillary permissions to each system role + SystemUserManagerDefaultPermissions = AddAncillaryPermissions(SystemUserManagerDefaultPermissions) + SystemReadOnlyAdminDefaultPermissions = AddAncillaryPermissions(SystemReadOnlyAdminDefaultPermissions) + SystemManagerDefaultPermissions = AddAncillaryPermissions(SystemManagerDefaultPermissions) +} + +type RoleType string +type RoleScope string + +const ( + SystemGuestRoleId = "system_guest" + SystemUserRoleId = "system_user" + SystemAdminRoleId = "system_admin" + SystemPostAllRoleId = "system_post_all" + SystemPostAllPublicRoleId = "system_post_all_public" + SystemUserAccessTokenRoleId = "system_user_access_token" + SystemUserManagerRoleId = "system_user_manager" + SystemReadOnlyAdminRoleId = "system_read_only_admin" + SystemManagerRoleId = "system_manager" + + TeamGuestRoleId = "team_guest" + TeamUserRoleId = "team_user" + TeamAdminRoleId = "team_admin" + TeamPostAllRoleId = "team_post_all" + TeamPostAllPublicRoleId = "team_post_all_public" + + ChannelGuestRoleId = "channel_guest" + ChannelUserRoleId = "channel_user" + ChannelAdminRoleId = "channel_admin" + + CustomGroupUserRoleId = "custom_group_user" + + PlaybookAdminRoleId = "playbook_admin" + PlaybookMemberRoleId = "playbook_member" + RunAdminRoleId = "run_admin" + RunMemberRoleId = "run_member" + + RoleNameMaxLength = 64 + RoleDisplayNameMaxLength = 128 + RoleDescriptionMaxLength = 1024 + + RoleScopeSystem RoleScope = "System" + RoleScopeTeam RoleScope = "Team" + RoleScopeChannel RoleScope = "Channel" + RoleScopeGroup RoleScope = "Group" + + RoleTypeGuest RoleType = "Guest" + RoleTypeUser RoleType = "User" + RoleTypeAdmin RoleType = "Admin" +) + +type Role struct { + Id string `json:"id"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + Permissions []string `json:"permissions"` + SchemeManaged bool `json:"scheme_managed"` + BuiltIn bool `json:"built_in"` +} + +type RolePatch struct { + Permissions *[]string `json:"permissions"` +} + +type RolePermissions struct { + RoleID string + Permissions []string +} + +func (r *Role) Patch(patch *RolePatch) { + if patch.Permissions != nil { + r.Permissions = *patch.Permissions + } +} + +// MergeChannelHigherScopedPermissions is meant to be invoked on a channel scheme's role and merges the higher-scoped +// channel role's permissions. +func (r *Role) MergeChannelHigherScopedPermissions(higherScopedPermissions *RolePermissions) { + mergedPermissions := []string{} + + higherScopedPermissionsMap := asStringBoolMap(higherScopedPermissions.Permissions) + rolePermissionsMap := asStringBoolMap(r.Permissions) + + for _, cp := range AllPermissions { + if cp.Scope != PermissionScopeChannel { + continue + } + + _, presentOnHigherScope := higherScopedPermissionsMap[cp.Id] + + // For the channel admin role always look to the higher scope to determine if the role has their permission. + // The channel admin is a special case because they're not part of the UI to be "channel moderated", only + // channel members and channel guests are. + if higherScopedPermissions.RoleID == ChannelAdminRoleId && presentOnHigherScope { + mergedPermissions = append(mergedPermissions, cp.Id) + continue + } + + _, permissionIsModerated := ChannelModeratedPermissionsMap[cp.Id] + if permissionIsModerated { + _, presentOnRole := rolePermissionsMap[cp.Id] + if presentOnRole && presentOnHigherScope { + mergedPermissions = append(mergedPermissions, cp.Id) + } + } else { + if presentOnHigherScope { + mergedPermissions = append(mergedPermissions, cp.Id) + } + } + } + + r.Permissions = mergedPermissions +} + +// Returns an array of permissions that are in either role.Permissions +// or patch.Permissions, but not both. +func PermissionsChangedByPatch(role *Role, patch *RolePatch) []string { + var result []string + + if patch.Permissions == nil { + return result + } + + roleMap := make(map[string]bool) + patchMap := make(map[string]bool) + + for _, permission := range role.Permissions { + roleMap[permission] = true + } + + for _, permission := range *patch.Permissions { + patchMap[permission] = true + } + + for _, permission := range role.Permissions { + if !patchMap[permission] { + result = append(result, permission) + } + } + + for _, permission := range *patch.Permissions { + if !roleMap[permission] { + result = append(result, permission) + } + } + + return result +} + +func ChannelModeratedPermissionsChangedByPatch(role *Role, patch *RolePatch) []string { + var result []string + + if role == nil { + return result + } + + if patch.Permissions == nil { + return result + } + + roleMap := make(map[string]bool) + patchMap := make(map[string]bool) + + for _, permission := range role.Permissions { + if channelModeratedPermissionName, found := ChannelModeratedPermissionsMap[permission]; found { + roleMap[channelModeratedPermissionName] = true + } + } + + for _, permission := range *patch.Permissions { + if channelModeratedPermissionName, found := ChannelModeratedPermissionsMap[permission]; found { + patchMap[channelModeratedPermissionName] = true + } + } + + for permissionKey := range roleMap { + if !patchMap[permissionKey] { + result = append(result, permissionKey) + } + } + + for permissionKey := range patchMap { + if !roleMap[permissionKey] { + result = append(result, permissionKey) + } + } + + return result +} + +// GetChannelModeratedPermissions returns a map of channel moderated permissions that the role has access to +func (r *Role) GetChannelModeratedPermissions(channelType ChannelType) map[string]bool { + moderatedPermissions := make(map[string]bool) + for _, permission := range r.Permissions { + if _, found := ChannelModeratedPermissionsMap[permission]; !found { + continue + } + + for moderated, moderatedPermissionValue := range ChannelModeratedPermissionsMap { + // the moderated permission has already been found to be true so skip this iteration + if moderatedPermissions[moderatedPermissionValue] { + continue + } + + if moderated == permission { + // Special case where the channel moderated permission for `manage_members` is different depending on whether the channel is private or public + if moderated == PermissionManagePublicChannelMembers.Id || moderated == PermissionManagePrivateChannelMembers.Id { + canManagePublic := channelType == ChannelTypeOpen && moderated == PermissionManagePublicChannelMembers.Id + canManagePrivate := channelType == ChannelTypePrivate && moderated == PermissionManagePrivateChannelMembers.Id + moderatedPermissions[moderatedPermissionValue] = canManagePublic || canManagePrivate + } else { + moderatedPermissions[moderatedPermissionValue] = true + } + } + } + } + + return moderatedPermissions +} + +// RolePatchFromChannelModerationsPatch Creates and returns a RolePatch based on a slice of ChannelModerationPatches, roleName is expected to be either "members" or "guests". +func (r *Role) RolePatchFromChannelModerationsPatch(channelModerationsPatch []*ChannelModerationPatch, roleName string) *RolePatch { + permissionsToAddToPatch := make(map[string]bool) + + // Iterate through the list of existing permissions on the role and append permissions that we want to keep. + for _, permission := range r.Permissions { + // Permission is not moderated so dont add it to the patch and skip the channelModerationsPatch + if _, isModerated := ChannelModeratedPermissionsMap[permission]; !isModerated { + continue + } + + permissionEnabled := true + // Check if permission has a matching moderated permission name inside the channel moderation patch + for _, channelModerationPatch := range channelModerationsPatch { + if *channelModerationPatch.Name == ChannelModeratedPermissionsMap[permission] { + // Permission key exists in patch with a value of false so skip over it + if roleName == "members" { + if channelModerationPatch.Roles.Members != nil && !*channelModerationPatch.Roles.Members { + permissionEnabled = false + } + } else if roleName == "guests" { + if channelModerationPatch.Roles.Guests != nil && !*channelModerationPatch.Roles.Guests { + permissionEnabled = false + } + } + } + } + + if permissionEnabled { + permissionsToAddToPatch[permission] = true + } + } + + // Iterate through the patch and add any permissions that dont already exist on the role + for _, channelModerationPatch := range channelModerationsPatch { + for permission, moderatedPermissionName := range ChannelModeratedPermissionsMap { + if roleName == "members" && channelModerationPatch.Roles.Members != nil && *channelModerationPatch.Roles.Members && *channelModerationPatch.Name == moderatedPermissionName { + permissionsToAddToPatch[permission] = true + } + + if roleName == "guests" && channelModerationPatch.Roles.Guests != nil && *channelModerationPatch.Roles.Guests && *channelModerationPatch.Name == moderatedPermissionName { + permissionsToAddToPatch[permission] = true + } + } + } + + patchPermissions := make([]string, 0, len(permissionsToAddToPatch)) + for permission := range permissionsToAddToPatch { + patchPermissions = append(patchPermissions, permission) + } + + return &RolePatch{Permissions: &patchPermissions} +} + +func (r *Role) IsValid() bool { + if !IsValidId(r.Id) { + return false + } + + return r.IsValidWithoutId() +} + +func (r *Role) IsValidWithoutId() bool { + if !IsValidRoleName(r.Name) { + return false + } + + if r.DisplayName == "" || len(r.DisplayName) > RoleDisplayNameMaxLength { + return false + } + + if len(r.Description) > RoleDescriptionMaxLength { + return false + } + + check := func(perms []*Permission, permission string) bool { + for _, p := range perms { + if permission == p.Id { + return true + } + } + return false + } + for _, permission := range r.Permissions { + permissionValidated := check(AllPermissions, permission) || check(DeprecatedPermissions, permission) + if !permissionValidated { + return false + } + } + + return true +} + +func CleanRoleNames(roleNames []string) ([]string, bool) { + var cleanedRoleNames []string + for _, roleName := range roleNames { + if strings.TrimSpace(roleName) == "" { + continue + } + + if !IsValidRoleName(roleName) { + return roleNames, false + } + + cleanedRoleNames = append(cleanedRoleNames, roleName) + } + + return cleanedRoleNames, true +} + +func IsValidRoleName(roleName string) bool { + if roleName == "" || len(roleName) > RoleNameMaxLength { + return false + } + + if strings.TrimLeft(roleName, "abcdefghijklmnopqrstuvwxyz0123456789_") != "" { + return false + } + + return true +} + +func MakeDefaultRoles() map[string]*Role { + roles := make(map[string]*Role) + + roles[CustomGroupUserRoleId] = &Role{ + Name: CustomGroupUserRoleId, + DisplayName: fmt.Sprintf("authentication.roles.%s.name", CustomGroupUserRoleId), + Description: fmt.Sprintf("authentication.roles.%s.description", CustomGroupUserRoleId), + Permissions: []string{}, + } + + roles[ChannelGuestRoleId] = &Role{ + Name: "channel_guest", + DisplayName: "authentication.roles.channel_guest.name", + Description: "authentication.roles.channel_guest.description", + Permissions: []string{ + PermissionReadChannel.Id, + PermissionAddReaction.Id, + PermissionRemoveReaction.Id, + PermissionUploadFile.Id, + PermissionEditPost.Id, + PermissionCreatePost.Id, + PermissionUseChannelMentions.Id, + PermissionUseSlashCommands.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[ChannelUserRoleId] = &Role{ + Name: "channel_user", + DisplayName: "authentication.roles.channel_user.name", + Description: "authentication.roles.channel_user.description", + Permissions: []string{ + PermissionReadChannel.Id, + PermissionAddReaction.Id, + PermissionRemoveReaction.Id, + PermissionManagePublicChannelMembers.Id, + PermissionUploadFile.Id, + PermissionGetPublicLink.Id, + PermissionCreatePost.Id, + PermissionUseChannelMentions.Id, + PermissionUseSlashCommands.Id, + PermissionManagePublicChannelProperties.Id, + PermissionDeletePublicChannel.Id, + PermissionManagePrivateChannelProperties.Id, + PermissionDeletePrivateChannel.Id, + PermissionManagePrivateChannelMembers.Id, + PermissionDeletePost.Id, + PermissionEditPost.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[ChannelAdminRoleId] = &Role{ + Name: "channel_admin", + DisplayName: "authentication.roles.channel_admin.name", + Description: "authentication.roles.channel_admin.description", + Permissions: []string{ + PermissionManageChannelRoles.Id, + PermissionUseGroupMentions.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[TeamGuestRoleId] = &Role{ + Name: "team_guest", + DisplayName: "authentication.roles.team_guest.name", + Description: "authentication.roles.team_guest.description", + Permissions: []string{ + PermissionViewTeam.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[TeamUserRoleId] = &Role{ + Name: "team_user", + DisplayName: "authentication.roles.team_user.name", + Description: "authentication.roles.team_user.description", + Permissions: []string{ + PermissionListTeamChannels.Id, + PermissionJoinPublicChannels.Id, + PermissionReadPublicChannel.Id, + PermissionViewTeam.Id, + PermissionCreatePublicChannel.Id, + PermissionCreatePrivateChannel.Id, + PermissionInviteUser.Id, + PermissionAddUserToTeam.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[TeamPostAllRoleId] = &Role{ + Name: "team_post_all", + DisplayName: "authentication.roles.team_post_all.name", + Description: "authentication.roles.team_post_all.description", + Permissions: []string{ + PermissionCreatePost.Id, + PermissionUseChannelMentions.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[TeamPostAllPublicRoleId] = &Role{ + Name: "team_post_all_public", + DisplayName: "authentication.roles.team_post_all_public.name", + Description: "authentication.roles.team_post_all_public.description", + Permissions: []string{ + PermissionCreatePostPublic.Id, + PermissionUseChannelMentions.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[TeamAdminRoleId] = &Role{ + Name: "team_admin", + DisplayName: "authentication.roles.team_admin.name", + Description: "authentication.roles.team_admin.description", + Permissions: []string{ + PermissionRemoveUserFromTeam.Id, + PermissionManageTeam.Id, + PermissionImportTeam.Id, + PermissionManageTeamRoles.Id, + PermissionManageChannelRoles.Id, + PermissionManageOthersIncomingWebhooks.Id, + PermissionManageOthersOutgoingWebhooks.Id, + PermissionManageSlashCommands.Id, + PermissionManageOthersSlashCommands.Id, + PermissionManageIncomingWebhooks.Id, + PermissionManageOutgoingWebhooks.Id, + PermissionConvertPublicChannelToPrivate.Id, + PermissionConvertPrivateChannelToPublic.Id, + PermissionDeletePost.Id, + PermissionDeleteOthersPosts.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[PlaybookAdminRoleId] = &Role{ + Name: PlaybookAdminRoleId, + DisplayName: "authentication.roles.playbook_admin.name", + Description: "authentication.roles.playbook_admin.description", + Permissions: []string{ + PermissionPublicPlaybookManageMembers.Id, + PermissionPublicPlaybookManageRoles.Id, + PermissionPublicPlaybookManageProperties.Id, + PermissionPrivatePlaybookManageMembers.Id, + PermissionPrivatePlaybookManageRoles.Id, + PermissionPrivatePlaybookManageProperties.Id, + PermissionPublicPlaybookMakePrivate.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[PlaybookMemberRoleId] = &Role{ + Name: PlaybookMemberRoleId, + DisplayName: "authentication.roles.playbook_member.name", + Description: "authentication.roles.playbook_member.description", + Permissions: []string{ + PermissionPublicPlaybookView.Id, + PermissionPublicPlaybookManageMembers.Id, + PermissionPublicPlaybookManageProperties.Id, + PermissionPrivatePlaybookView.Id, + PermissionPrivatePlaybookManageMembers.Id, + PermissionPrivatePlaybookManageProperties.Id, + PermissionRunCreate.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[RunAdminRoleId] = &Role{ + Name: RunAdminRoleId, + DisplayName: "authentication.roles.run_admin.name", + Description: "authentication.roles.run_admin.description", + Permissions: []string{ + PermissionRunManageMembers.Id, + PermissionRunManageProperties.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[RunMemberRoleId] = &Role{ + Name: RunMemberRoleId, + DisplayName: "authentication.roles.run_member.name", + Description: "authentication.roles.run_member.description", + Permissions: []string{ + PermissionRunView.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[SystemGuestRoleId] = &Role{ + Name: "system_guest", + DisplayName: "authentication.roles.global_guest.name", + Description: "authentication.roles.global_guest.description", + Permissions: []string{ + PermissionCreateDirectChannel.Id, + PermissionCreateGroupChannel.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[SystemUserRoleId] = &Role{ + Name: "system_user", + DisplayName: "authentication.roles.global_user.name", + Description: "authentication.roles.global_user.description", + Permissions: []string{ + PermissionListPublicTeams.Id, + PermissionJoinPublicTeams.Id, + PermissionCreateDirectChannel.Id, + PermissionCreateGroupChannel.Id, + PermissionViewMembers.Id, + PermissionCreateTeam.Id, + PermissionCreateCustomGroup.Id, + PermissionEditCustomGroup.Id, + PermissionDeleteCustomGroup.Id, + PermissionManageCustomGroupMembers.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[SystemPostAllRoleId] = &Role{ + Name: "system_post_all", + DisplayName: "authentication.roles.system_post_all.name", + Description: "authentication.roles.system_post_all.description", + Permissions: []string{ + PermissionCreatePost.Id, + PermissionUseChannelMentions.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SystemPostAllPublicRoleId] = &Role{ + Name: "system_post_all_public", + DisplayName: "authentication.roles.system_post_all_public.name", + Description: "authentication.roles.system_post_all_public.description", + Permissions: []string{ + PermissionCreatePostPublic.Id, + PermissionUseChannelMentions.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SystemUserAccessTokenRoleId] = &Role{ + Name: "system_user_access_token", + DisplayName: "authentication.roles.system_user_access_token.name", + Description: "authentication.roles.system_user_access_token.description", + Permissions: []string{ + PermissionCreateUserAccessToken.Id, + PermissionReadUserAccessToken.Id, + PermissionRevokeUserAccessToken.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SystemUserManagerRoleId] = &Role{ + Name: "system_user_manager", + DisplayName: "authentication.roles.system_user_manager.name", + Description: "authentication.roles.system_user_manager.description", + Permissions: SystemUserManagerDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SystemReadOnlyAdminRoleId] = &Role{ + Name: "system_read_only_admin", + DisplayName: "authentication.roles.system_read_only_admin.name", + Description: "authentication.roles.system_read_only_admin.description", + Permissions: SystemReadOnlyAdminDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SystemManagerRoleId] = &Role{ + Name: "system_manager", + DisplayName: "authentication.roles.system_manager.name", + Description: "authentication.roles.system_manager.description", + Permissions: SystemManagerDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + allPermissionIDs := []string{} + for _, permission := range AllPermissions { + allPermissionIDs = append(allPermissionIDs, permission.Id) + } + + roles[SystemAdminRoleId] = &Role{ + Name: "system_admin", + DisplayName: "authentication.roles.global_admin.name", + Description: "authentication.roles.global_admin.description", + // System admins can do anything channel and team admins can do + // plus everything members of teams and channels can do to all teams + // and channels on the system + Permissions: allPermissionIDs, + SchemeManaged: true, + BuiltIn: true, + } + + return roles +} + +func AddAncillaryPermissions(permissions []string) []string { + for _, permission := range permissions { + if ancillaryPermissions, ok := SysconsoleAncillaryPermissions[permission]; ok { + for _, ancillaryPermission := range ancillaryPermissions { + permissions = append(permissions, ancillaryPermission.Id) + } + } + } + return permissions +} + +func asStringBoolMap(list []string) map[string]bool { + listMap := make(map[string]bool, len(list)) + for _, p := range list { + listMap[p] = true + } + return listMap +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go b/vendor/github.com/mattermost/mattermost-server/v6/model/saml.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/v5/model/saml.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/saml.go index feaf325a..e9e987d8 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/saml.go @@ -4,18 +4,16 @@ package model import ( - "encoding/json" "encoding/xml" - "io" "time" ) const ( - USER_AUTH_SERVICE_SAML = "saml" - USER_AUTH_SERVICE_SAML_TEXT = "SAML" - USER_AUTH_SERVICE_IS_SAML = "isSaml" - USER_AUTH_SERVICE_IS_MOBILE = "isMobile" - USER_AUTH_SERVICE_IS_OAUTH = "isOAuthUser" + UserAuthServiceSaml = "saml" + UserAuthServiceSamlText = "SAML" + UserAuthServiceIsSaml = "isSaml" + UserAuthServiceIsMobile = "isMobile" + UserAuthServiceIsOAuth = "isOAuthUser" ) type SamlAuthRequest struct { @@ -31,8 +29,8 @@ type SamlCertificateStatus struct { } type SamlMetadataResponse struct { - IdpDescriptorUrl string `json:"idp_descriptor_url"` - IdpUrl string `json:"idp_url"` + IdpDescriptorURL string `json:"idp_descriptor_url"` + IdpURL string `json:"idp_url"` IdpPublicCertificate string `json:"idp_public_certificate"` } @@ -176,25 +174,3 @@ type EntityDescriptor struct { Organization Organization `xml:"Organization"` ContactPerson ContactPerson `xml:"ContactPerson"` } - -func (s *SamlCertificateStatus) ToJson() string { - b, _ := json.Marshal(s) - return string(b) -} - -func SamlCertificateStatusFromJson(data io.Reader) *SamlCertificateStatus { - var status *SamlCertificateStatus - json.NewDecoder(data).Decode(&status) - return status -} - -func (s *SamlMetadataResponse) ToJson() string { - b, _ := json.Marshal(s) - return string(b) -} - -func SamlMetadataResponseFromJson(data io.Reader) *SamlMetadataResponse { - var status *SamlMetadataResponse - json.NewDecoder(data).Decode(&status) - return status -} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/scheduled_task.go b/vendor/github.com/mattermost/mattermost-server/v6/model/scheduled_task.go new file mode 100644 index 00000000..cf20db63 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/scheduled_task.go @@ -0,0 +1,100 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "fmt" + "time" +) + +type TaskFunc func() + +type ScheduledTask struct { + Name string `json:"name"` + Interval time.Duration `json:"interval"` + Recurring bool `json:"recurring"` + function func() + cancel chan struct{} + cancelled chan struct{} + fromNextIntervalTime bool +} + +func CreateTask(name string, function TaskFunc, timeToExecution time.Duration) *ScheduledTask { + return createTask(name, function, timeToExecution, false, false) +} + +func CreateRecurringTask(name string, function TaskFunc, interval time.Duration) *ScheduledTask { + return createTask(name, function, interval, true, false) +} + +func CreateRecurringTaskFromNextIntervalTime(name string, function TaskFunc, interval time.Duration) *ScheduledTask { + return createTask(name, function, interval, true, true) +} + +func createTask(name string, function TaskFunc, interval time.Duration, recurring bool, fromNextIntervalTime bool) *ScheduledTask { + task := &ScheduledTask{ + Name: name, + Interval: interval, + Recurring: recurring, + function: function, + cancel: make(chan struct{}), + cancelled: make(chan struct{}), + fromNextIntervalTime: fromNextIntervalTime, + } + + go func() { + defer close(task.cancelled) + + var firstTick <-chan time.Time + var ticker *time.Ticker + + if task.fromNextIntervalTime { + currTime := time.Now() + first := currTime.Truncate(interval) + if first.Before(currTime) { + first = first.Add(interval) + } + firstTick = time.After(time.Until(first)) + ticker = &time.Ticker{C: nil} + } else { + firstTick = nil + ticker = time.NewTicker(interval) + } + defer func() { + ticker.Stop() + }() + + for { + select { + case <-firstTick: + ticker = time.NewTicker(interval) + function() + case <-ticker.C: + function() + case <-task.cancel: + return + } + + if !task.Recurring { + break + } + } + }() + + return task +} + +func (task *ScheduledTask) Cancel() { + close(task.cancel) + <-task.cancelled +} + +func (task *ScheduledTask) String() string { + return fmt.Sprintf( + "%s\nInterval: %s\nRecurring: %t\n", + task.Name, + task.Interval.String(), + task.Recurring, + ) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/scheme.go b/vendor/github.com/mattermost/mattermost-server/v6/model/scheme.go new file mode 100644 index 00000000..b2bc713c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/scheme.go @@ -0,0 +1,197 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "fmt" + "regexp" +) + +const ( + SchemeDisplayNameMaxLength = 128 + SchemeNameMaxLength = 64 + SchemeDescriptionMaxLength = 1024 + SchemeScopeTeam = "team" + SchemeScopeChannel = "channel" + SchemeScopePlaybook = "playbook" + SchemeScopeRun = "run" +) + +type Scheme struct { + Id string `json:"id"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + Scope string `json:"scope"` + DefaultTeamAdminRole string `json:"default_team_admin_role"` + DefaultTeamUserRole string `json:"default_team_user_role"` + DefaultChannelAdminRole string `json:"default_channel_admin_role"` + DefaultChannelUserRole string `json:"default_channel_user_role"` + DefaultTeamGuestRole string `json:"default_team_guest_role"` + DefaultChannelGuestRole string `json:"default_channel_guest_role"` + DefaultPlaybookAdminRole string `json:"default_playbook_admin_role"` + DefaultPlaybookMemberRole string `json:"default_playbook_member_role"` + DefaultRunAdminRole string `json:"default_run_admin_role"` + DefaultRunMemberRole string `json:"default_run_member_role"` +} + +type SchemePatch struct { + Name *string `json:"name"` + DisplayName *string `json:"display_name"` + Description *string `json:"description"` +} + +type SchemeIDPatch struct { + SchemeID *string `json:"scheme_id"` +} + +// SchemeConveyor is used for importing and exporting a Scheme and its associated Roles. +type SchemeConveyor struct { + Name string `json:"name"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + Scope string `json:"scope"` + TeamAdmin string `json:"default_team_admin_role"` + TeamUser string `json:"default_team_user_role"` + TeamGuest string `json:"default_team_guest_role"` + ChannelAdmin string `json:"default_channel_admin_role"` + ChannelUser string `json:"default_channel_user_role"` + ChannelGuest string `json:"default_channel_guest_role"` + PlaybookAdmin string `json:"default_playbook_admin_role"` + PlaybookMember string `json:"default_playbook_member_role"` + RunAdmin string `json:"default_run_admin_role"` + RunMember string `json:"default_run_member_role"` + Roles []*Role `json:"roles"` +} + +func (sc *SchemeConveyor) Scheme() *Scheme { + return &Scheme{ + DisplayName: sc.DisplayName, + Name: sc.Name, + Description: sc.Description, + Scope: sc.Scope, + DefaultTeamAdminRole: sc.TeamAdmin, + DefaultTeamUserRole: sc.TeamUser, + DefaultTeamGuestRole: sc.TeamGuest, + DefaultChannelAdminRole: sc.ChannelAdmin, + DefaultChannelUserRole: sc.ChannelUser, + DefaultChannelGuestRole: sc.ChannelGuest, + DefaultPlaybookAdminRole: sc.PlaybookAdmin, + DefaultPlaybookMemberRole: sc.PlaybookMember, + DefaultRunAdminRole: sc.RunAdmin, + DefaultRunMemberRole: sc.RunMember, + } +} + +type SchemeRoles struct { + SchemeAdmin bool `json:"scheme_admin"` + SchemeUser bool `json:"scheme_user"` + SchemeGuest bool `json:"scheme_guest"` +} + +func (scheme *Scheme) IsValid() bool { + if !IsValidId(scheme.Id) { + return false + } + + return scheme.IsValidForCreate() +} + +func (scheme *Scheme) IsValidForCreate() bool { + if scheme.DisplayName == "" || len(scheme.DisplayName) > SchemeDisplayNameMaxLength { + return false + } + + if !IsValidSchemeName(scheme.Name) { + return false + } + + if len(scheme.Description) > SchemeDescriptionMaxLength { + return false + } + + switch scheme.Scope { + case SchemeScopeTeam, SchemeScopeChannel, SchemeScopePlaybook, SchemeScopeRun: + default: + return false + } + + if !IsValidRoleName(scheme.DefaultChannelAdminRole) { + return false + } + + if !IsValidRoleName(scheme.DefaultChannelUserRole) { + return false + } + + if !IsValidRoleName(scheme.DefaultChannelGuestRole) { + return false + } + + if scheme.Scope == SchemeScopeTeam { + if !IsValidRoleName(scheme.DefaultTeamAdminRole) { + return false + } + + if !IsValidRoleName(scheme.DefaultTeamUserRole) { + return false + } + + if !IsValidRoleName(scheme.DefaultTeamGuestRole) { + return false + } + + if !IsValidRoleName(scheme.DefaultPlaybookAdminRole) { + return false + } + + if !IsValidRoleName(scheme.DefaultPlaybookMemberRole) { + return false + } + + if !IsValidRoleName(scheme.DefaultRunAdminRole) { + return false + } + + if !IsValidRoleName(scheme.DefaultRunMemberRole) { + return false + } + } + + if scheme.Scope == SchemeScopeChannel { + if scheme.DefaultTeamAdminRole != "" { + return false + } + + if scheme.DefaultTeamUserRole != "" { + return false + } + + if scheme.DefaultTeamGuestRole != "" { + return false + } + } + + return true +} + +func (scheme *Scheme) Patch(patch *SchemePatch) { + if patch.DisplayName != nil { + scheme.DisplayName = *patch.DisplayName + } + if patch.Name != nil { + scheme.Name = *patch.Name + } + if patch.Description != nil { + scheme.Description = *patch.Description + } +} + +func IsValidSchemeName(name string) bool { + re := regexp.MustCompile(fmt.Sprintf("^[a-z0-9_]{2,%d}$", SchemeNameMaxLength)) + return re.MatchString(name) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go b/vendor/github.com/mattermost/mattermost-server/v6/model/search_params.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/search_params.go index e6dce73c..41a2db2a 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/search_params.go @@ -4,6 +4,7 @@ package model import ( + "net/http" "regexp" "strings" "time" @@ -24,6 +25,8 @@ type SearchParams struct { ExcludedAfterDate string BeforeDate string ExcludedBeforeDate string + Extensions []string + ExcludedExtensions []string OnDate string ExcludedDate string OrTerms bool @@ -105,7 +108,7 @@ func (p *SearchParams) GetExcludedDateMillis() (int64, int64) { return GetStartOfDayMillis(date, p.TimeZoneOffset), GetEndOfDayMillis(date, p.TimeZoneOffset) } -var searchFlags = [...]string{"from", "channel", "in", "before", "after", "on"} +var searchFlags = [...]string{"from", "channel", "in", "before", "after", "on", "ext"} type flag struct { name string @@ -213,7 +216,7 @@ func parseSearchFlags(input []string) ([]searchWord, []flag) { // and remove extra pound #s word = hashtagStart.ReplaceAllString(word, "#") - if len(word) != 0 { + if word != "" { words = append(words, searchWord{ word, exclude, @@ -264,6 +267,8 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { excludedBeforeDate := "" onDate := "" excludedDate := "" + excludedExtensions := []string{} + extensions := []string{} for _, flag := range flags { if flag.name == "in" || flag.name == "channel" { @@ -296,12 +301,18 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { } else { onDate = flag.value } + } else if flag.name == "ext" { + if flag.exclude { + excludedExtensions = append(excludedExtensions, flag.value) + } else { + extensions = append(extensions, flag.value) + } } } paramsList := []*SearchParams{} - if len(plainTerms) > 0 || len(excludedPlainTerms) > 0 { + if plainTerms != "" || excludedPlainTerms != "" { paramsList = append(paramsList, &SearchParams{ Terms: plainTerms, ExcludedTerms: excludedPlainTerms, @@ -314,13 +325,15 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { ExcludedAfterDate: excludedAfterDate, BeforeDate: beforeDate, ExcludedBeforeDate: excludedBeforeDate, + Extensions: extensions, + ExcludedExtensions: excludedExtensions, OnDate: onDate, ExcludedDate: excludedDate, TimeZoneOffset: timeZoneOffset, }) } - if len(hashtagTerms) > 0 || len(excludedHashtagTerms) > 0 { + if hashtagTerms != "" || excludedHashtagTerms != "" { paramsList = append(paramsList, &SearchParams{ Terms: hashtagTerms, ExcludedTerms: excludedHashtagTerms, @@ -333,6 +346,8 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { ExcludedAfterDate: excludedAfterDate, BeforeDate: beforeDate, ExcludedBeforeDate: excludedBeforeDate, + Extensions: extensions, + ExcludedExtensions: excludedExtensions, OnDate: onDate, ExcludedDate: excludedDate, TimeZoneOffset: timeZoneOffset, @@ -340,13 +355,14 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { } // special case for when no terms are specified but we still have a filter - if len(plainTerms) == 0 && len(hashtagTerms) == 0 && - len(excludedPlainTerms) == 0 && len(excludedHashtagTerms) == 0 && + if plainTerms == "" && hashtagTerms == "" && + excludedPlainTerms == "" && excludedHashtagTerms == "" && (len(inChannels) != 0 || len(fromUsers) != 0 || len(excludedChannels) != 0 || len(excludedUsers) != 0 || - len(afterDate) != 0 || len(excludedAfterDate) != 0 || - len(beforeDate) != 0 || len(excludedBeforeDate) != 0 || - len(onDate) != 0 || len(excludedDate) != 0) { + len(extensions) != 0 || len(excludedExtensions) != 0 || + afterDate != "" || excludedAfterDate != "" || + beforeDate != "" || excludedBeforeDate != "" || + onDate != "" || excludedDate != "") { paramsList = append(paramsList, &SearchParams{ Terms: "", ExcludedTerms: "", @@ -359,6 +375,8 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { ExcludedAfterDate: excludedAfterDate, BeforeDate: beforeDate, ExcludedBeforeDate: excludedBeforeDate, + Extensions: extensions, + ExcludedExtensions: excludedExtensions, OnDate: onDate, ExcludedDate: excludedDate, TimeZoneOffset: timeZoneOffset, @@ -367,3 +385,13 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { return paramsList } + +func IsSearchParamsListValid(paramsList []*SearchParams) *AppError { + // All SearchParams should have same IncludeDeletedChannels value. + for _, params := range paramsList { + if params.IncludeDeletedChannels != paramsList[0].IncludeDeletedChannels { + return NewAppError("IsSearchParamsListValid", "model.search_params_list.is_valid.include_deleted_channels.app_error", nil, "", http.StatusInternalServerError) + } + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/security_bulletin.go b/vendor/github.com/mattermost/mattermost-server/v6/model/security_bulletin.go new file mode 100644 index 00000000..fa5662cf --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/security_bulletin.go @@ -0,0 +1,11 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type SecurityBulletin struct { + Id string `json:"id"` + AppliesToVersion string `json:"applies_to_version"` +} + +type SecurityBulletins []SecurityBulletin diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/session.go b/vendor/github.com/mattermost/mattermost-server/v6/model/session.go new file mode 100644 index 00000000..fcf8eea1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/session.go @@ -0,0 +1,230 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" + "strconv" + "strings" + + "github.com/mattermost/mattermost-server/v6/shared/mlog" +) + +const ( + SessionCookieToken = "MMAUTHTOKEN" + SessionCookieUser = "MMUSERID" + SessionCookieCsrf = "MMCSRF" + SessionCookieCloudUrl = "MMCLOUDURL" + SessionCacheSize = 35000 + SessionPropPlatform = "platform" + SessionPropOs = "os" + SessionPropBrowser = "browser" + SessionPropType = "type" + SessionPropUserAccessTokenId = "user_access_token_id" + SessionPropIsBot = "is_bot" + SessionPropIsBotValue = "true" + SessionPropOAuthAppID = "oauth_app_id" + SessionPropMattermostAppID = "mattermost_app_id" + SessionTypeUserAccessToken = "UserAccessToken" + SessionTypeCloudKey = "CloudKey" + SessionTypeRemoteclusterToken = "RemoteClusterToken" + SessionPropIsGuest = "is_guest" + SessionActivityTimeout = 1000 * 60 * 5 // 5 minutes + SessionUserAccessTokenExpiry = 100 * 365 // 100 years +) + +//msgp StringMap +type StringMap map[string]string + +//msgp:tuple Session + +// Session contains the user session details. +// This struct's serializer methods are auto-generated. If a new field is added/removed, +// please run make gen-serialized. +type Session struct { + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + ExpiresAt int64 `json:"expires_at"` + LastActivityAt int64 `json:"last_activity_at"` + UserId string `json:"user_id"` + DeviceId string `json:"device_id"` + Roles string `json:"roles"` + IsOAuth bool `json:"is_oauth"` + ExpiredNotify bool `json:"expired_notify"` + Props StringMap `json:"props"` + TeamMembers []*TeamMember `json:"team_members" db:"-"` + Local bool `json:"local" db:"-"` +} + +// Returns true if the session is unrestricted, which should grant it +// with all permissions. This is used for local mode sessions +func (s *Session) IsUnrestricted() bool { + return s.Local +} + +func (s *Session) DeepCopy() *Session { + copySession := *s + + if s.Props != nil { + copySession.Props = CopyStringMap(s.Props) + } + + if s.TeamMembers != nil { + copySession.TeamMembers = make([]*TeamMember, len(s.TeamMembers)) + for index, tm := range s.TeamMembers { + copySession.TeamMembers[index] = new(TeamMember) + *copySession.TeamMembers[index] = *tm + } + } + + return ©Session +} + +func (s *Session) IsValid() *AppError { + if !IsValidId(s.Id) { + return NewAppError("Session.IsValid", "model.session.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if !IsValidId(s.UserId) { + return NewAppError("Session.IsValid", "model.session.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if s.CreateAt == 0 { + return NewAppError("Session.IsValid", "model.session.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + + if len(s.Roles) > UserRolesMaxLength { + return NewAppError("Session.IsValid", "model.session.is_valid.roles_limit.app_error", + map[string]interface{}{"Limit": UserRolesMaxLength}, "session_id="+s.Id, http.StatusBadRequest) + } + + return nil +} + +func (s *Session) PreSave() { + if s.Id == "" { + s.Id = NewId() + } + + if s.Token == "" { + s.Token = NewId() + } + + s.CreateAt = GetMillis() + s.LastActivityAt = s.CreateAt + + if s.Props == nil { + s.Props = make(map[string]string) + } +} + +func (s *Session) Sanitize() { + s.Token = "" +} + +func (s *Session) IsExpired() bool { + + if s.ExpiresAt <= 0 { + return false + } + + if GetMillis() > s.ExpiresAt { + return true + } + + return false +} + +func (s *Session) AddProp(key string, value string) { + + if s.Props == nil { + s.Props = make(map[string]string) + } + + s.Props[key] = value +} + +func (s *Session) GetTeamByTeamId(teamId string) *TeamMember { + for _, team := range s.TeamMembers { + if team.TeamId == teamId { + return team + } + } + + return nil +} + +func (s *Session) IsMobileApp() bool { + return s.DeviceId != "" || s.IsMobile() +} + +func (s *Session) IsMobile() bool { + val, ok := s.Props[UserAuthServiceIsMobile] + if !ok { + return false + } + isMobile, err := strconv.ParseBool(val) + if err != nil { + mlog.Debug("Error parsing boolean property from Session", mlog.Err(err)) + return false + } + return isMobile +} + +func (s *Session) IsSaml() bool { + val, ok := s.Props[UserAuthServiceIsSaml] + if !ok { + return false + } + isSaml, err := strconv.ParseBool(val) + if err != nil { + mlog.Debug("Error parsing boolean property from Session", mlog.Err(err)) + return false + } + return isSaml +} + +func (s *Session) IsOAuthUser() bool { + val, ok := s.Props[UserAuthServiceIsOAuth] + if !ok { + return false + } + isOAuthUser, err := strconv.ParseBool(val) + if err != nil { + mlog.Debug("Error parsing boolean property from Session", mlog.Err(err)) + return false + } + return isOAuthUser +} + +func (s *Session) IsSSOLogin() bool { + return s.IsOAuthUser() || s.IsSaml() +} + +func (s *Session) GetUserRoles() []string { + return strings.Fields(s.Roles) +} + +func (s *Session) GenerateCSRF() string { + token := NewId() + s.AddProp("csrf", token) + return token +} + +func (s *Session) GetCSRF() string { + if s.Props == nil { + return "" + } + + return s.Props["csrf"] +} + +func (s *Session) CreateAt_() float64 { + return float64(s.CreateAt) +} + +func (s *Session) ExpiresAt_() float64 { + return float64(s.ExpiresAt) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/session_serial_gen.go b/vendor/github.com/mattermost/mattermost-server/v6/model/session_serial_gen.go new file mode 100644 index 00000000..612bbb89 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/session_serial_gen.go @@ -0,0 +1,540 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *Session) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 13 { + err = msgp.ArrayError{Wanted: 13, Got: zb0001} + return + } + z.Id, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.Token, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + z.CreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.ExpiresAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + z.LastActivityAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.UserId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + z.DeviceId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.IsOAuth, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + z.ExpiredNotify, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + if cap(z.TeamMembers) >= int(zb0003) { + z.TeamMembers = (z.TeamMembers)[:zb0003] + } else { + z.TeamMembers = make([]*TeamMember, zb0003) + } + for za0003 := range z.TeamMembers { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + z.TeamMembers[za0003] = nil + } else { + if z.TeamMembers[za0003] == nil { + z.TeamMembers[za0003] = new(TeamMember) + } + err = z.TeamMembers[za0003].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + z.Local, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Session) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 13 + err = en.Append(0x9d) + if err != nil { + return + } + err = en.WriteString(z.Id) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + err = en.WriteString(z.Token) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + err = en.WriteInt64(z.CreateAt) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + err = en.WriteInt64(z.ExpiresAt) + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + err = en.WriteInt64(z.LastActivityAt) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + err = en.WriteString(z.UserId) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + err = en.WriteString(z.DeviceId) + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + err = en.WriteBool(z.IsOAuth) + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + err = en.WriteBool(z.ExpiredNotify) + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + err = en.WriteMapHeader(uint32(len(z.Props))) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + for za0001, za0002 := range z.Props { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + } + err = en.WriteArrayHeader(uint32(len(z.TeamMembers))) + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.TeamMembers[za0003].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + err = en.WriteBool(z.Local) + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Session) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // array header, size 13 + o = append(o, 0x9d) + o = msgp.AppendString(o, z.Id) + o = msgp.AppendString(o, z.Token) + o = msgp.AppendInt64(o, z.CreateAt) + o = msgp.AppendInt64(o, z.ExpiresAt) + o = msgp.AppendInt64(o, z.LastActivityAt) + o = msgp.AppendString(o, z.UserId) + o = msgp.AppendString(o, z.DeviceId) + o = msgp.AppendString(o, z.Roles) + o = msgp.AppendBool(o, z.IsOAuth) + o = msgp.AppendBool(o, z.ExpiredNotify) + o = msgp.AppendMapHeader(o, uint32(len(z.Props))) + for za0001, za0002 := range z.Props { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + o = msgp.AppendArrayHeader(o, uint32(len(z.TeamMembers))) + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.TeamMembers[za0003].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + o = msgp.AppendBool(o, z.Local) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Session) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 13 { + err = msgp.ArrayError{Wanted: 13, Got: zb0001} + return + } + z.Id, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.Token, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + z.CreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.ExpiresAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + z.LastActivityAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.UserId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + z.DeviceId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.IsOAuth, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + z.ExpiredNotify, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + if cap(z.TeamMembers) >= int(zb0003) { + z.TeamMembers = (z.TeamMembers)[:zb0003] + } else { + z.TeamMembers = make([]*TeamMember, zb0003) + } + for za0003 := range z.TeamMembers { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TeamMembers[za0003] = nil + } else { + if z.TeamMembers[za0003] == nil { + z.TeamMembers[za0003] = new(TeamMember) + } + bts, err = z.TeamMembers[za0003].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + z.Local, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Session) Msgsize() (s int) { + s = 1 + msgp.StringPrefixSize + len(z.Id) + msgp.StringPrefixSize + len(z.Token) + msgp.Int64Size + msgp.Int64Size + msgp.Int64Size + msgp.StringPrefixSize + len(z.UserId) + msgp.StringPrefixSize + len(z.DeviceId) + msgp.StringPrefixSize + len(z.Roles) + msgp.BoolSize + msgp.BoolSize + msgp.MapHeaderSize + if z.Props != nil { + for za0001, za0002 := range z.Props { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += msgp.ArrayHeaderSize + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + s += msgp.NilSize + } else { + s += z.TeamMembers[za0003].Msgsize() + } + } + s += msgp.BoolSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *StringMap) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(StringMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + zb0003-- + var zb0001 string + var zb0002 string + zb0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err) + return + } + zb0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + (*z)[zb0001] = zb0002 + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z StringMap) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteMapHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0004, zb0005 := range z { + err = en.WriteString(zb0004) + if err != nil { + err = msgp.WrapError(err) + return + } + err = en.WriteString(zb0005) + if err != nil { + err = msgp.WrapError(err, zb0004) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z StringMap) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendMapHeader(o, uint32(len(z))) + for zb0004, zb0005 := range z { + o = msgp.AppendString(o, zb0004) + o = msgp.AppendString(o, zb0005) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StringMap) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(StringMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + var zb0001 string + var zb0002 string + zb0003-- + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + zb0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + (*z)[zb0001] = zb0002 + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z StringMap) Msgsize() (s int) { + s = msgp.MapHeaderSize + if z != nil { + for zb0004, zb0005 := range z { + _ = zb0005 + s += msgp.StringPrefixSize + len(zb0004) + msgp.StringPrefixSize + len(zb0005) + } + } + return +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go b/vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go new file mode 100644 index 00000000..453d18e4 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go @@ -0,0 +1,250 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" + "unicode/utf8" +) + +// SharedChannel represents a channel that can be synchronized with a remote cluster. +// If "home" is true, then the shared channel is homed locally and "SharedChannelRemote" +// table contains the remote clusters that have been invited. +// If "home" is false, then the shared channel is homed remotely, and "RemoteId" +// field points to the remote cluster connection in "RemoteClusters" table. +type SharedChannel struct { + ChannelId string `json:"id"` + TeamId string `json:"team_id"` + Home bool `json:"home"` + ReadOnly bool `json:"readonly"` + ShareName string `json:"name"` + ShareDisplayName string `json:"display_name"` + SharePurpose string `json:"purpose"` + ShareHeader string `json:"header"` + CreatorId string `json:"creator_id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + RemoteId string `json:"remote_id,omitempty"` // if not "home" + Type ChannelType `db:"-"` +} + +func (sc *SharedChannel) IsValid() *AppError { + if !IsValidId(sc.ChannelId) { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.id.app_error", nil, "ChannelId="+sc.ChannelId, http.StatusBadRequest) + } + + if sc.Type != ChannelTypeDirect && !IsValidId(sc.TeamId) { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.id.app_error", nil, "TeamId="+sc.TeamId, http.StatusBadRequest) + } + + if sc.CreateAt == 0 { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.create_at.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest) + } + + if sc.UpdateAt == 0 { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.update_at.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest) + } + + if utf8.RuneCountInString(sc.ShareDisplayName) > ChannelDisplayNameMaxRunes { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.display_name.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest) + } + + if !IsValidChannelIdentifier(sc.ShareName) { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.1_or_more.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest) + } + + if utf8.RuneCountInString(sc.ShareHeader) > ChannelHeaderMaxRunes { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.header.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest) + } + + if utf8.RuneCountInString(sc.SharePurpose) > ChannelPurposeMaxRunes { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.purpose.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest) + } + + if !IsValidId(sc.CreatorId) { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "CreatorId="+sc.CreatorId, http.StatusBadRequest) + } + + if !sc.Home { + if !IsValidId(sc.RemoteId) { + return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.id.app_error", nil, "RemoteId="+sc.RemoteId, http.StatusBadRequest) + } + } + return nil +} + +func (sc *SharedChannel) PreSave() { + sc.ShareName = SanitizeUnicode(sc.ShareName) + sc.ShareDisplayName = SanitizeUnicode(sc.ShareDisplayName) + + sc.CreateAt = GetMillis() + sc.UpdateAt = sc.CreateAt +} + +func (sc *SharedChannel) PreUpdate() { + sc.UpdateAt = GetMillis() + sc.ShareName = SanitizeUnicode(sc.ShareName) + sc.ShareDisplayName = SanitizeUnicode(sc.ShareDisplayName) +} + +// SharedChannelRemote represents a remote cluster that has been invited +// to a shared channel. +type SharedChannelRemote struct { + Id string `json:"id"` + ChannelId string `json:"channel_id"` + CreatorId string `json:"creator_id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + IsInviteAccepted bool `json:"is_invite_accepted"` + IsInviteConfirmed bool `json:"is_invite_confirmed"` + RemoteId string `json:"remote_id"` + LastPostUpdateAt int64 `json:"last_post_update_at"` + LastPostId string `json:"last_post_id"` +} + +func (sc *SharedChannelRemote) IsValid() *AppError { + if !IsValidId(sc.Id) { + return NewAppError("SharedChannelRemote.IsValid", "model.channel.is_valid.id.app_error", nil, "Id="+sc.Id, http.StatusBadRequest) + } + + if !IsValidId(sc.ChannelId) { + return NewAppError("SharedChannelRemote.IsValid", "model.channel.is_valid.id.app_error", nil, "ChannelId="+sc.ChannelId, http.StatusBadRequest) + } + + if sc.CreateAt == 0 { + return NewAppError("SharedChannelRemote.IsValid", "model.channel.is_valid.create_at.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest) + } + + if sc.UpdateAt == 0 { + return NewAppError("SharedChannelRemote.IsValid", "model.channel.is_valid.update_at.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest) + } + + if !IsValidId(sc.CreatorId) { + return NewAppError("SharedChannelRemote.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "id="+sc.CreatorId, http.StatusBadRequest) + } + return nil +} + +func (sc *SharedChannelRemote) PreSave() { + if sc.Id == "" { + sc.Id = NewId() + } + sc.CreateAt = GetMillis() + sc.UpdateAt = sc.CreateAt +} + +func (sc *SharedChannelRemote) PreUpdate() { + sc.UpdateAt = GetMillis() +} + +type SharedChannelRemoteStatus struct { + ChannelId string `json:"channel_id"` + DisplayName string `json:"display_name"` + SiteURL string `json:"site_url"` + LastPingAt int64 `json:"last_ping_at"` + NextSyncAt int64 `json:"next_sync_at"` + ReadOnly bool `json:"readonly"` + IsInviteAccepted bool `json:"is_invite_accepted"` + Token string `json:"token"` +} + +// SharedChannelUser stores a lastSyncAt timestamp on behalf of a remote cluster for +// each user that has been synchronized. +type SharedChannelUser struct { + Id string `json:"id"` + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + RemoteId string `json:"remote_id"` + CreateAt int64 `json:"create_at"` + LastSyncAt int64 `json:"last_sync_at"` +} + +func (scu *SharedChannelUser) PreSave() { + scu.Id = NewId() + scu.CreateAt = GetMillis() +} + +func (scu *SharedChannelUser) IsValid() *AppError { + if !IsValidId(scu.Id) { + return NewAppError("SharedChannelUser.IsValid", "model.channel.is_valid.id.app_error", nil, "Id="+scu.Id, http.StatusBadRequest) + } + + if !IsValidId(scu.UserId) { + return NewAppError("SharedChannelUser.IsValid", "model.channel.is_valid.id.app_error", nil, "UserId="+scu.UserId, http.StatusBadRequest) + } + + if !IsValidId(scu.ChannelId) { + return NewAppError("SharedChannelUser.IsValid", "model.channel.is_valid.id.app_error", nil, "ChannelId="+scu.ChannelId, http.StatusBadRequest) + } + + if !IsValidId(scu.RemoteId) { + return NewAppError("SharedChannelUser.IsValid", "model.channel.is_valid.id.app_error", nil, "RemoteId="+scu.RemoteId, http.StatusBadRequest) + } + + if scu.CreateAt == 0 { + return NewAppError("SharedChannelUser.IsValid", "model.channel.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + return nil +} + +type GetUsersForSyncFilter struct { + CheckProfileImage bool + ChannelID string + Limit uint64 +} + +// SharedChannelAttachment stores a lastSyncAt timestamp on behalf of a remote cluster for +// each file attachment that has been synchronized. +type SharedChannelAttachment struct { + Id string `json:"id"` + FileId string `json:"file_id"` + RemoteId string `json:"remote_id"` + CreateAt int64 `json:"create_at"` + LastSyncAt int64 `json:"last_sync_at"` +} + +func (scf *SharedChannelAttachment) PreSave() { + if scf.Id == "" { + scf.Id = NewId() + } + if scf.CreateAt == 0 { + scf.CreateAt = GetMillis() + scf.LastSyncAt = scf.CreateAt + } else { + scf.LastSyncAt = GetMillis() + } +} + +func (scf *SharedChannelAttachment) IsValid() *AppError { + if !IsValidId(scf.Id) { + return NewAppError("SharedChannelAttachment.IsValid", "model.channel.is_valid.id.app_error", nil, "Id="+scf.Id, http.StatusBadRequest) + } + + if !IsValidId(scf.FileId) { + return NewAppError("SharedChannelAttachment.IsValid", "model.channel.is_valid.id.app_error", nil, "FileId="+scf.FileId, http.StatusBadRequest) + } + + if !IsValidId(scf.RemoteId) { + return NewAppError("SharedChannelAttachment.IsValid", "model.channel.is_valid.id.app_error", nil, "RemoteId="+scf.RemoteId, http.StatusBadRequest) + } + + if scf.CreateAt == 0 { + return NewAppError("SharedChannelAttachment.IsValid", "model.channel.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + return nil +} + +type SharedChannelFilterOpts struct { + TeamId string + CreatorId string + MemberId string + ExcludeHome bool + ExcludeRemote bool +} + +type SharedChannelRemoteFilterOpts struct { + ChannelId string + RemoteId string + InclUnconfirmed bool +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/slack_attachment.go b/vendor/github.com/mattermost/mattermost-server/v6/model/slack_attachment.go similarity index 98% rename from vendor/github.com/mattermost/mattermost-server/v5/model/slack_attachment.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/slack_attachment.go index a85c6be2..94ba9935 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/slack_attachment.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/slack_attachment.go @@ -165,7 +165,7 @@ func StringifySlackFieldValue(a []*SlackAttachment) []*SlackAttachment { // all else should be set in the post which is passed func ParseSlackAttachment(post *Post, attachments []*SlackAttachment) { if post.Type == "" { - post.Type = POST_SLACK_ATTACHMENT + post.Type = PostTypeSlackAttachment } postAttachments := []*SlackAttachment{} @@ -179,6 +179,9 @@ func ParseSlackAttachment(post *Post, attachments []*SlackAttachment) { attachment.Pretext = ParseSlackLinksToMarkdown(attachment.Pretext) for _, field := range attachment.Fields { + if field == nil { + continue + } if value, ok := field.Value.(string); ok { field.Value = ParseSlackLinksToMarkdown(value) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/slack_compatibility.go b/vendor/github.com/mattermost/mattermost-server/v6/model/slack_compatibility.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/slack_compatibility.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/slack_compatibility.go diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/status.go b/vendor/github.com/mattermost/mattermost-server/v6/model/status.go new file mode 100644 index 00000000..5a5e9425 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/status.go @@ -0,0 +1,68 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" +) + +const ( + StatusOutOfOffice = "ooo" + StatusOffline = "offline" + StatusAway = "away" + StatusDnd = "dnd" + StatusOnline = "online" + StatusCacheSize = SessionCacheSize + StatusChannelTimeout = 20000 // 20 seconds + StatusMinUpdateTime = 120000 // 2 minutes +) + +type Status struct { + UserId string `json:"user_id"` + Status string `json:"status"` + Manual bool `json:"manual"` + LastActivityAt int64 `json:"last_activity_at"` + ActiveChannel string `json:"active_channel,omitempty" db:"-"` + DNDEndTime int64 `json:"dnd_end_time"` + PrevStatus string `json:"-"` +} + +func (s *Status) ToJSON() ([]byte, error) { + sCopy := *s + sCopy.ActiveChannel = "" + return json.Marshal(sCopy) +} + +// The following are some GraphQL methods necessary to return the +// data in float64 type. The spec doesn't support 64 bit integers, +// so we have to pass the data in float64. The _ at the end is +// a hack to keep the attribute name same in GraphQL schema. + +func (s *Status) LastActivityAt_() float64 { + return float64(s.LastActivityAt) +} + +func (s *Status) DNDEndTime_() float64 { + return float64(s.DNDEndTime) +} + +func StatusListToJSON(u []*Status) ([]byte, error) { + list := make([]Status, len(u)) + for i, s := range u { + list[i] = *s + list[i].ActiveChannel = "" + } + return json.Marshal(list) +} + +func StatusMapToInterfaceMap(statusMap map[string]*Status) map[string]interface{} { + interfaceMap := map[string]interface{}{} + for _, s := range statusMap { + // Omitted statues mean offline + if s.Status != StatusOffline { + interfaceMap[s.UserId] = s.Status + } + } + return interfaceMap +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/suggest_command.go b/vendor/github.com/mattermost/mattermost-server/v6/model/suggest_command.go new file mode 100644 index 00000000..7fb045fc --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/suggest_command.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type SuggestCommand struct { + Suggestion string `json:"suggestion"` + Description string `json:"description"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/switch_request.go b/vendor/github.com/mattermost/mattermost-server/v6/model/switch_request.go new file mode 100644 index 00000000..70694cb1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/switch_request.go @@ -0,0 +1,39 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type SwitchRequest struct { + CurrentService string `json:"current_service"` + NewService string `json:"new_service"` + Email string `json:"email"` + Password string `json:"password"` + NewPassword string `json:"new_password"` + MfaCode string `json:"mfa_code"` + LdapLoginId string `json:"ldap_id"` +} + +func (o *SwitchRequest) EmailToOAuth() bool { + return o.CurrentService == UserAuthServiceEmail && + (o.NewService == UserAuthServiceSaml || + o.NewService == UserAuthServiceGitlab || + o.NewService == ServiceGoogle || + o.NewService == ServiceOffice365 || + o.NewService == ServiceOpenid) +} + +func (o *SwitchRequest) OAuthToEmail() bool { + return (o.CurrentService == UserAuthServiceSaml || + o.CurrentService == UserAuthServiceGitlab || + o.CurrentService == ServiceGoogle || + o.CurrentService == ServiceOffice365 || + o.CurrentService == ServiceOpenid) && o.NewService == UserAuthServiceEmail +} + +func (o *SwitchRequest) EmailToLdap() bool { + return o.CurrentService == UserAuthServiceEmail && o.NewService == UserAuthServiceLdap +} + +func (o *SwitchRequest) LdapToEmail() bool { + return o.CurrentService == UserAuthServiceLdap && o.NewService == UserAuthServiceEmail +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/system.go b/vendor/github.com/mattermost/mattermost-server/v6/model/system.go new file mode 100644 index 00000000..b84a8dad --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/system.go @@ -0,0 +1,178 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "math/big" +) + +const ( + SystemTelemetryId = "DiagnosticId" + SystemRanUnitTests = "RanUnitTests" + SystemLastSecurityTime = "LastSecurityTime" + SystemActiveLicenseId = "ActiveLicenseId" + SystemLastComplianceTime = "LastComplianceTime" + SystemAsymmetricSigningKeyKey = "AsymmetricSigningKey" + SystemPostActionCookieSecretKey = "PostActionCookieSecret" + SystemInstallationDateKey = "InstallationDate" + SystemFirstServerRunTimestampKey = "FirstServerRunTimestamp" + SystemClusterEncryptionKey = "ClusterEncryptionKey" + SystemUpgradedFromTeId = "UpgradedFromTE" + SystemWarnMetricNumberOfTeams5 = "warn_metric_number_of_teams_5" + SystemWarnMetricNumberOfChannels50 = "warn_metric_number_of_channels_50" + SystemWarnMetricMfa = "warn_metric_mfa" + SystemWarnMetricEmailDomain = "warn_metric_email_domain" + SystemWarnMetricNumberOfActiveUsers100 = "warn_metric_number_of_active_users_100" + SystemWarnMetricNumberOfActiveUsers200 = "warn_metric_number_of_active_users_200" + SystemWarnMetricNumberOfActiveUsers300 = "warn_metric_number_of_active_users_300" + SystemWarnMetricNumberOfActiveUsers500 = "warn_metric_number_of_active_users_500" + SystemWarnMetricNumberOfPosts2m = "warn_metric_number_of_posts_2M" + SystemWarnMetricLastRunTimestampKey = "LastWarnMetricRunTimestamp" + SystemFirstAdminVisitMarketplace = "FirstAdminVisitMarketplace" + SystemFirstAdminSetupComplete = "FirstAdminSetupComplete" + AwsMeteringReportInterval = 1 + AwsMeteringDimensionUsageHrs = "UsageHrs" +) + +const ( + WarnMetricStatusLimitReached = "true" + WarnMetricStatusRunonce = "runonce" + WarnMetricStatusAck = "ack" + WarnMetricStatusStorePrefix = "warn_metric_" + WarnMetricJobInterval = 24 * 7 + WarnMetricNumberOfActiveUsers25 = 25 + WarnMetricJobWaitTime = 1000 * 3600 * 24 * 7 // 7 days +) + +type System struct { + Name string `json:"name"` + Value string `json:"value"` +} + +type SystemPostActionCookieSecret struct { + Secret []byte `json:"key,omitempty"` +} + +type SystemAsymmetricSigningKey struct { + ECDSAKey *SystemECDSAKey `json:"ecdsa_key,omitempty"` +} + +type SystemECDSAKey struct { + Curve string `json:"curve"` + X *big.Int `json:"x"` + Y *big.Int `json:"y"` + D *big.Int `json:"d,omitempty"` +} + +// ServerBusyState provides serialization for app.Busy. +type ServerBusyState struct { + Busy bool `json:"busy"` + Expires int64 `json:"expires"` + ExpiresTS string `json:"expires_ts,omitempty"` +} + +type SupportPacket struct { + ServerOS string `yaml:"server_os"` + ServerArchitecture string `yaml:"server_architecture"` + ServerVersion string `yaml:"server_version"` + BuildHash string `yaml:"build_hash,omitempty"` + DatabaseType string `yaml:"database_type"` + DatabaseVersion string `yaml:"database_version"` + LdapVendorName string `yaml:"ldap_vendor_name,omitempty"` + LdapVendorVersion string `yaml:"ldap_vendor_version,omitempty"` + ElasticServerVersion string `yaml:"elastic_server_version,omitempty"` + ElasticServerPlugins []string `yaml:"elastic_server_plugins,omitempty"` +} + +type FileData struct { + Filename string + Body []byte +} + +var WarnMetricsTable = map[string]WarnMetric{ + SystemWarnMetricMfa: { + Id: SystemWarnMetricMfa, + Limit: -1, + IsBotOnly: true, + IsRunOnce: true, + }, + SystemWarnMetricEmailDomain: { + Id: SystemWarnMetricEmailDomain, + Limit: -1, + IsBotOnly: true, + IsRunOnce: true, + }, + SystemWarnMetricNumberOfTeams5: { + Id: SystemWarnMetricNumberOfTeams5, + Limit: 5, + IsBotOnly: true, + IsRunOnce: true, + }, + SystemWarnMetricNumberOfChannels50: { + Id: SystemWarnMetricNumberOfChannels50, + Limit: 50, + IsBotOnly: true, + IsRunOnce: true, + }, + SystemWarnMetricNumberOfActiveUsers100: { + Id: SystemWarnMetricNumberOfActiveUsers100, + Limit: 100, + IsBotOnly: true, + IsRunOnce: true, + }, + SystemWarnMetricNumberOfActiveUsers200: { + Id: SystemWarnMetricNumberOfActiveUsers200, + Limit: 200, + IsBotOnly: true, + IsRunOnce: true, + }, + SystemWarnMetricNumberOfActiveUsers300: { + Id: SystemWarnMetricNumberOfActiveUsers300, + Limit: 300, + IsBotOnly: true, + IsRunOnce: true, + }, + SystemWarnMetricNumberOfActiveUsers500: { + Id: SystemWarnMetricNumberOfActiveUsers500, + Limit: 500, + IsBotOnly: false, + IsRunOnce: true, + }, + SystemWarnMetricNumberOfPosts2m: { + Id: SystemWarnMetricNumberOfPosts2m, + Limit: 2000000, + IsBotOnly: false, + IsRunOnce: true, + }, +} + +type WarnMetric struct { + Id string + Limit int64 + IsBotOnly bool + IsRunOnce bool + SkipAction bool +} + +type WarnMetricDisplayTexts struct { + BotTitle string + BotMessageBody string + BotSuccessMessage string + EmailBody string +} +type WarnMetricStatus struct { + Id string `json:"id"` + Limit int64 `json:"limit"` + Acked bool `json:"acked"` + StoreStatus string `json:"store_status,omitempty"` +} + +type SendWarnMetricAck struct { + ForceAck bool `json:"forceAck"` +} + +type AppliedMigration struct { + Version int `json:"version"` + Name string `json:"name"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/team.go b/vendor/github.com/mattermost/mattermost-server/v6/model/team.go similarity index 72% rename from vendor/github.com/mattermost/mattermost-server/v5/model/team.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/team.go index 381eb8bb..92b6f7fc 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/team.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/team.go @@ -4,9 +4,7 @@ package model import ( - "encoding/json" "fmt" - "io" "net/http" "regexp" "strings" @@ -14,15 +12,15 @@ import ( ) const ( - TEAM_OPEN = "O" - TEAM_INVITE = "I" - TEAM_ALLOWED_DOMAINS_MAX_LENGTH = 500 - TEAM_COMPANY_NAME_MAX_LENGTH = 64 - TEAM_DESCRIPTION_MAX_LENGTH = 255 - TEAM_DISPLAY_NAME_MAX_RUNES = 64 - TEAM_EMAIL_MAX_LENGTH = 128 - TEAM_NAME_MAX_LENGTH = 64 - TEAM_NAME_MIN_LENGTH = 2 + TeamOpen = "O" + TeamInvite = "I" + TeamAllowedDomainsMaxLength = 500 + TeamCompanyNameMaxLength = 64 + TeamDescriptionMaxLength = 255 + TeamDisplayNameMaxRunes = 64 + TeamEmailMaxLength = 128 + TeamNameMaxLength = 64 + TeamNameMinLength = 2 ) type Team struct { @@ -42,6 +40,7 @@ type Team struct { LastTeamIconUpdate int64 `json:"last_team_icon_update,omitempty"` SchemeId *string `json:"scheme_id"` GroupConstrained *bool `json:"group_constrained"` + PolicyID *string `json:"policy_id"` } type TeamPatch struct { @@ -67,12 +66,6 @@ type TeamsWithCount struct { TotalCount int64 `json:"total_count"` } -func InvitesFromJson(data io.Reader) *Invites { - var o *Invites - json.NewDecoder(data).Decode(&o) - return o -} - func (o *Invites) ToEmailList() []string { emailList := make([]string, len(o.Invites)) for _, invite := range o.Invites { @@ -81,61 +74,11 @@ func (o *Invites) ToEmailList() []string { return emailList } -func (o *Invites) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func (o *Team) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func TeamFromJson(data io.Reader) *Team { - var o *Team - json.NewDecoder(data).Decode(&o) - return o -} - -func TeamMapToJson(u map[string]*Team) string { - b, _ := json.Marshal(u) - return string(b) -} - -func TeamMapFromJson(data io.Reader) map[string]*Team { - var teams map[string]*Team - json.NewDecoder(data).Decode(&teams) - return teams -} - -func TeamListToJson(t []*Team) string { - b, _ := json.Marshal(t) - return string(b) -} - -func TeamsWithCountToJson(tlc *TeamsWithCount) []byte { - b, _ := json.Marshal(tlc) - return b -} - -func TeamsWithCountFromJson(data io.Reader) *TeamsWithCount { - var twc *TeamsWithCount - json.NewDecoder(data).Decode(&twc) - return twc -} - -func TeamListFromJson(data io.Reader) []*Team { - var teams []*Team - json.NewDecoder(data).Decode(&teams) - return teams -} - func (o *Team) Etag() string { return Etag(o.Id, o.UpdateAt) } func (o *Team) IsValid() *AppError { - if !IsValidId(o.Id) { return NewAppError("Team.IsValid", "model.team.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -148,27 +91,27 @@ func (o *Team) IsValid() *AppError { return NewAppError("Team.IsValid", "model.team.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.Email) > TEAM_EMAIL_MAX_LENGTH { + if len(o.Email) > TeamEmailMaxLength { return NewAppError("Team.IsValid", "model.team.is_valid.email.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.Email) > 0 && !IsValidEmail(o.Email) { + if o.Email != "" && !IsValidEmail(o.Email) { return NewAppError("Team.IsValid", "model.team.is_valid.email.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if utf8.RuneCountInString(o.DisplayName) == 0 || utf8.RuneCountInString(o.DisplayName) > TEAM_DISPLAY_NAME_MAX_RUNES { + if utf8.RuneCountInString(o.DisplayName) == 0 || utf8.RuneCountInString(o.DisplayName) > TeamDisplayNameMaxRunes { return NewAppError("Team.IsValid", "model.team.is_valid.name.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.Name) > TEAM_NAME_MAX_LENGTH { + if len(o.Name) > TeamNameMaxLength { return NewAppError("Team.IsValid", "model.team.is_valid.url.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.Description) > TEAM_DESCRIPTION_MAX_LENGTH { + if len(o.Description) > TeamDescriptionMaxLength { return NewAppError("Team.IsValid", "model.team.is_valid.description.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.InviteId) == 0 { + if o.InviteId == "" { return NewAppError("Team.IsValid", "model.team.is_valid.invite_id.app_error", nil, "id="+o.Id, http.StatusBadRequest) } @@ -180,15 +123,15 @@ func (o *Team) IsValid() *AppError { return NewAppError("Team.IsValid", "model.team.is_valid.characters.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if !(o.Type == TEAM_OPEN || o.Type == TEAM_INVITE) { + if !(o.Type == TeamOpen || o.Type == TeamInvite) { return NewAppError("Team.IsValid", "model.team.is_valid.type.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.CompanyName) > TEAM_COMPANY_NAME_MAX_LENGTH { + if len(o.CompanyName) > TeamCompanyNameMaxLength { return NewAppError("Team.IsValid", "model.team.is_valid.company.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.AllowedDomains) > TEAM_ALLOWED_DOMAINS_MAX_LENGTH { + if len(o.AllowedDomains) > TeamAllowedDomainsMaxLength { return NewAppError("Team.IsValid", "model.team.is_valid.domains.app_error", nil, "id="+o.Id, http.StatusBadRequest) } @@ -208,7 +151,7 @@ func (o *Team) PreSave() { o.Description = SanitizeUnicode(o.Description) o.CompanyName = SanitizeUnicode(o.CompanyName) - if len(o.InviteId) == 0 { + if o.InviteId == "" { o.InviteId = NewId() } } @@ -234,11 +177,11 @@ func IsReservedTeamName(s string) bool { } func IsValidTeamName(s string) bool { - if !IsValidAlphaNum(s) { + if !isValidAlphaNum(s) { return false } - if len(s) < TEAM_NAME_MIN_LENGTH { + if len(s) < TeamNameMinLength { return false } @@ -309,22 +252,21 @@ func (o *Team) IsGroupConstrained() bool { return o.GroupConstrained != nil && *o.GroupConstrained } -func (t *TeamPatch) ToJson() string { - b, err := json.Marshal(t) - if err != nil { - return "" - } - - return string(b) +// ShallowCopy returns a shallow copy of team. +func (o *Team) ShallowCopy() *Team { + c := *o + return &c } -func TeamPatchFromJson(data io.Reader) *TeamPatch { - decoder := json.NewDecoder(data) - var team TeamPatch - err := decoder.Decode(&team) - if err != nil { - return nil - } +// The following are some GraphQL methods necessary to return the +// data in float64 type. The spec doesn't support 64 bit integers, +// so we have to pass the data in float64. The _ at the end is +// a hack to keep the attribute name same in GraphQL schema. + +func (o *Team) UpdateAt_() float64 { + return float64(o.UpdateAt) +} - return &team +func (o *Team) LastTeamIconUpdate_() float64 { + return float64(o.LastTeamIconUpdate) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/team_member.go b/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go similarity index 57% rename from vendor/github.com/mattermost/mattermost-server/v5/model/team_member.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go index b747f17c..45a22803 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/team_member.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go @@ -4,9 +4,7 @@ package model import ( - "encoding/json" "fmt" - "io" "net/http" "strings" ) @@ -15,6 +13,9 @@ const ( USERNAME = "Username" ) +//msgp:tuple TeamMember +// This struct's serializer methods are auto-generated. If a new field is added/removed, +// please run make gen-serialized. type TeamMember struct { TeamId string `json:"team_id"` UserId string `json:"user_id"` @@ -26,28 +27,37 @@ type TeamMember struct { ExplicitRoles string `json:"explicit_roles"` } +//msgp:ignore TeamUnread type TeamUnread struct { - TeamId string `json:"team_id"` - MsgCount int64 `json:"msg_count"` - MentionCount int64 `json:"mention_count"` + TeamId string `json:"team_id"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` + MentionCountRoot int64 `json:"mention_count_root"` + MsgCountRoot int64 `json:"msg_count_root"` + ThreadCount int64 `json:"thread_count"` + ThreadMentionCount int64 `json:"thread_mention_count"` } +//msgp:ignore TeamMemberForExport type TeamMemberForExport struct { TeamMember TeamName string } +//msgp:ignore TeamMemberWithError type TeamMemberWithError struct { UserId string `json:"user_id"` Member *TeamMember `json:"member"` Error *AppError `json:"error"` } +//msgp:ignore EmailInviteWithError type EmailInviteWithError struct { Email string `json:"email"` Error *AppError `json:"error"` } +//msgp:ignore TeamMembersGetOptions type TeamMembersGetOptions struct { // Sort the team members. Accepts "Username", but defaults to "Id". Sort string @@ -59,32 +69,9 @@ type TeamMembersGetOptions struct { ViewRestrictions *ViewUsersRestrictions } -func (o *TeamMember) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func (o *TeamUnread) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func TeamMemberFromJson(data io.Reader) *TeamMember { - var o *TeamMember - json.NewDecoder(data).Decode(&o) - return o -} - -func TeamUnreadFromJson(data io.Reader) *TeamUnread { - var o *TeamUnread - json.NewDecoder(data).Decode(&o) - return o -} - -func EmailInviteWithErrorFromJson(data io.Reader) []*EmailInviteWithError { - var o []*EmailInviteWithError - json.NewDecoder(data).Decode(&o) - return o +//msgp:ignore TeamInviteReminderData +type TeamInviteReminderData struct { + Interval string } func EmailInviteWithErrorToEmails(o []*EmailInviteWithError) []string { @@ -97,14 +84,6 @@ func EmailInviteWithErrorToEmails(o []*EmailInviteWithError) []string { return ret } -func EmailInviteWithErrorToJson(o []*EmailInviteWithError) string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - func EmailInviteWithErrorToString(o *EmailInviteWithError) string { return fmt.Sprintf("%s:%s", o.Email, o.Error.Error()) } @@ -119,54 +98,11 @@ func TeamMembersWithErrorToTeamMembers(o []*TeamMemberWithError) []*TeamMember { return ret } -func TeamMembersWithErrorToJson(o []*TeamMemberWithError) string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - func TeamMemberWithErrorToString(o *TeamMemberWithError) string { return fmt.Sprintf("%s:%s", o.UserId, o.Error.Error()) } -func TeamMembersWithErrorFromJson(data io.Reader) []*TeamMemberWithError { - var o []*TeamMemberWithError - json.NewDecoder(data).Decode(&o) - return o -} - -func TeamMembersToJson(o []*TeamMember) string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - -func TeamMembersFromJson(data io.Reader) []*TeamMember { - var o []*TeamMember - json.NewDecoder(data).Decode(&o) - return o -} - -func TeamsUnreadToJson(o []*TeamUnread) string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - -func TeamsUnreadFromJson(data io.Reader) []*TeamUnread { - var o []*TeamUnread - json.NewDecoder(data).Decode(&o) - return o -} - func (o *TeamMember) IsValid() *AppError { - if !IsValidId(o.TeamId) { return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) } @@ -175,6 +111,11 @@ func (o *TeamMember) IsValid() *AppError { return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } + if len(o.Roles) > UserRolesMaxLength { + return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.roles_limit.app_error", + map[string]interface{}{"Limit": UserRolesMaxLength}, "", http.StatusBadRequest) + } + return nil } @@ -184,3 +125,9 @@ func (o *TeamMember) PreUpdate() { func (o *TeamMember) GetRoles() []string { return strings.Fields(o.Roles) } + +// DeleteAt_ returns the deleteAt value in float64. This is necessary to work +// with GraphQL since it doesn't support 64 bit integers. +func (o *TeamMember) DeleteAt_() float64 { + return float64(o.DeleteAt) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/team_member_serial_gen.go b/vendor/github.com/mattermost/mattermost-server/v6/model/team_member_serial_gen.go new file mode 100644 index 00000000..044a608a --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/team_member_serial_gen.go @@ -0,0 +1,193 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *TeamMember) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 8 { + err = msgp.ArrayError{Wanted: 8, Got: zb0001} + return + } + z.TeamId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + z.UserId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.DeleteAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + z.SchemeGuest, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + z.SchemeUser, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + z.SchemeAdmin, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + z.ExplicitRoles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *TeamMember) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 8 + err = en.Append(0x98) + if err != nil { + return + } + err = en.WriteString(z.TeamId) + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + err = en.WriteString(z.UserId) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + err = en.WriteInt64(z.DeleteAt) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + err = en.WriteBool(z.SchemeGuest) + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + err = en.WriteBool(z.SchemeUser) + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + err = en.WriteBool(z.SchemeAdmin) + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + err = en.WriteString(z.ExplicitRoles) + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TeamMember) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // array header, size 8 + o = append(o, 0x98) + o = msgp.AppendString(o, z.TeamId) + o = msgp.AppendString(o, z.UserId) + o = msgp.AppendString(o, z.Roles) + o = msgp.AppendInt64(o, z.DeleteAt) + o = msgp.AppendBool(o, z.SchemeGuest) + o = msgp.AppendBool(o, z.SchemeUser) + o = msgp.AppendBool(o, z.SchemeAdmin) + o = msgp.AppendString(o, z.ExplicitRoles) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TeamMember) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 8 { + err = msgp.ArrayError{Wanted: 8, Got: zb0001} + return + } + z.TeamId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + z.UserId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.DeleteAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + z.SchemeGuest, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + z.SchemeUser, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + z.SchemeAdmin, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + z.ExplicitRoles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TeamMember) Msgsize() (s int) { + s = 1 + msgp.StringPrefixSize + len(z.TeamId) + msgp.StringPrefixSize + len(z.UserId) + msgp.StringPrefixSize + len(z.Roles) + msgp.Int64Size + msgp.BoolSize + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.ExplicitRoles) + return +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/team_search.go b/vendor/github.com/mattermost/mattermost-server/v6/model/team_search.go new file mode 100644 index 00000000..c4a39275 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/team_search.go @@ -0,0 +1,22 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type TeamSearch struct { + Term string `json:"term"` + Page *int `json:"page,omitempty"` + PerPage *int `json:"per_page,omitempty"` + AllowOpenInvite *bool `json:"allow_open_invite,omitempty"` + GroupConstrained *bool `json:"group_constrained,omitempty"` + IncludeGroupConstrained *bool `json:"include_group_constrained,omitempty"` + PolicyID *string `json:"policy_id,omitempty"` + ExcludePolicyConstrained *bool `json:"exclude_policy_constrained,omitempty"` + IncludePolicyID *bool `json:"-"` + IncludeDeleted *bool `json:"-"` + TeamType *string `json:"-"` +} + +func (t *TeamSearch) IsPaginated() bool { + return t.Page != nil && t.PerPage != nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/team_stats.go b/vendor/github.com/mattermost/mattermost-server/v6/model/team_stats.go similarity index 56% rename from vendor/github.com/mattermost/mattermost-server/v5/model/team_stats.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/team_stats.go index 9209a0cf..0a3a7387 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/team_stats.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/team_stats.go @@ -3,24 +3,8 @@ package model -import ( - "encoding/json" - "io" -) - type TeamStats struct { TeamId string `json:"team_id"` TotalMemberCount int64 `json:"total_member_count"` ActiveMemberCount int64 `json:"active_member_count"` } - -func (o *TeamStats) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func TeamStatsFromJson(data io.Reader) *TeamStats { - var o *TeamStats - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/terms_of_service.go b/vendor/github.com/mattermost/mattermost-server/v6/model/terms_of_service.go similarity index 70% rename from vendor/github.com/mattermost/mattermost-server/v5/model/terms_of_service.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/terms_of_service.go index 8ce5d350..b6e531a2 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/terms_of_service.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/terms_of_service.go @@ -4,15 +4,11 @@ package model import ( - "encoding/json" "fmt" - "io" "net/http" "unicode/utf8" ) -const TERMS_OF_SERVICE_CACHE_SIZE = 1 - type TermsOfService struct { Id string `json:"id"` CreateAt int64 `json:"create_at"` @@ -33,31 +29,20 @@ func (t *TermsOfService) IsValid() *AppError { return InvalidTermsOfServiceError("user_id", t.Id) } - if utf8.RuneCountInString(t.Text) > POST_MESSAGE_MAX_RUNES_V2 { + if utf8.RuneCountInString(t.Text) > PostMessageMaxRunesV2 { return InvalidTermsOfServiceError("text", t.Id) } return nil } -func (t *TermsOfService) ToJson() string { - b, _ := json.Marshal(t) - return string(b) -} - -func TermsOfServiceFromJson(data io.Reader) *TermsOfService { - var termsOfService *TermsOfService - json.NewDecoder(data).Decode(&termsOfService) - return termsOfService -} - func InvalidTermsOfServiceError(fieldName string, termsOfServiceId string) *AppError { id := fmt.Sprintf("model.terms_of_service.is_valid.%s.app_error", fieldName) details := "" if termsOfServiceId != "" { details = "terms_of_service_id=" + termsOfServiceId } - return NewAppError("TermsOfService.IsValid", id, map[string]interface{}{"MaxLength": POST_MESSAGE_MAX_RUNES_V2}, details, http.StatusBadRequest) + return NewAppError("TermsOfService.IsValid", id, map[string]interface{}{"MaxLength": PostMessageMaxRunesV2}, details, http.StatusBadRequest) } func (t *TermsOfService) PreSave() { diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/thread.go b/vendor/github.com/mattermost/mattermost-server/v6/model/thread.go new file mode 100644 index 00000000..17fffddd --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/thread.go @@ -0,0 +1,114 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// Thread tracks the metadata associated with a root post and its reply posts. +// +// Note that Thread metadata does not exist until the first reply to a root post. +type Thread struct { + // PostId is the root post of the thread. + PostId string `json:"id"` + + // ChannelId is the channel in which the thread was posted. + ChannelId string `json:"channel_id"` + + // ReplyCount is the number of replies to the thread (excluding deleted posts). + ReplyCount int64 `json:"reply_count"` + + // LastReplyAt is the timestamp of the most recent post to the thread. + LastReplyAt int64 `json:"last_reply_at"` + + // Participants is a list of user ids that have replied to the thread, sorted by the oldest + // to newest. Note that the root post author is not included in this list until they reply. + Participants StringArray `json:"participants"` +} + +type ThreadResponse struct { + PostId string `json:"id"` + ReplyCount int64 `json:"reply_count"` + LastReplyAt int64 `json:"last_reply_at"` + LastViewedAt int64 `json:"last_viewed_at"` + Participants []*User `json:"participants"` + Post *Post `json:"post"` + UnreadReplies int64 `json:"unread_replies"` + UnreadMentions int64 `json:"unread_mentions"` +} + +type Threads struct { + Total int64 `json:"total"` + TotalUnreadThreads int64 `json:"total_unread_threads"` + TotalUnreadMentions int64 `json:"total_unread_mentions"` + Threads []*ThreadResponse `json:"threads"` +} + +type GetUserThreadsOpts struct { + // PageSize specifies the size of the returned chunk of results. Default = 30 + PageSize uint64 + + // Extended will enrich the response with participant details. Default = false + Extended bool + + // Deleted will specify that even deleted threads should be returned (For mobile sync). Default = false + Deleted bool + + // Since filters the threads based on their LastUpdateAt timestamp. + Since uint64 + + // Before specifies thread id as a cursor for pagination and will return `PageSize` threads before the cursor + Before string + + // After specifies thread id as a cursor for pagination and will return `PageSize` threads after the cursor + After string + + // Unread will make sure that only threads with unread replies are returned + Unread bool + + // TotalsOnly will not fetch any threads and just fetch the total counts + TotalsOnly bool + + // ThreadsOnly will fetch threads but not calculate totals and will return 0 + ThreadsOnly bool + + // TeamOnly will only fetch threads and unreads for the specified team and excludes DMs/GMs + TeamOnly bool +} + +func (o *Thread) Etag() string { + return Etag(o.PostId, o.LastReplyAt) +} + +// ThreadMembership models the relationship between a user and a thread of posts, with a similar +// data structure as ChannelMembership. +type ThreadMembership struct { + // PostId is the root post id of the thread in question. + PostId string `json:"post_id"` + + // UserId is the user whose membership in the thread is being tracked. + UserId string `json:"user_id"` + + // Following tracks whether the user is following the given thread. This defaults to true + // when a ThreadMembership record is created (a record doesn't exist until the user first + // starts following the thread), but the user can stop following or resume following at + // will. + Following bool `json:"following"` + + // LastUpdated is either the creation time of the membership record, or the last time the + // membership record was changed (e.g. started/stopped following, viewed thread, mention + // count change). + // + // This field is used to constrain queries of thread memberships to those updated after + // a given timestamp (e.g. on websocket reconnect). It's also used as the time column for + // deletion decisions during any configured retention policy. + LastUpdated int64 `json:"last_update_at"` + + // LastViewed is the last time the user viewed this thread. It is the thread analogue to + // the ChannelMembership's LastViewedAt and is used to decide when there are new replies + // for the user and where the user should start reading. + LastViewed int64 `json:"last_view_at"` + + // UnreadMentions is the number of unseen at-mentions for the user in the given thread. It + // is the thread analogue to the ChannelMembership's MentionCount, and is used to highlight + // threads with the mention count. + UnreadMentions int64 `json:"unread_mentions"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/token.go b/vendor/github.com/mattermost/mattermost-server/v6/model/token.go similarity index 76% rename from vendor/github.com/mattermost/mattermost-server/v5/model/token.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/token.go index 0730778c..90fc729f 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/token.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/token.go @@ -3,12 +3,14 @@ package model -import "net/http" +import ( + "net/http" +) const ( - TOKEN_SIZE = 64 - MAX_TOKEN_EXIPRY_TIME = 1000 * 60 * 60 * 48 // 48 hour - TOKEN_TYPE_OAUTH = "oauth" + TokenSize = 64 + MaxTokenExipryTime = 1000 * 60 * 60 * 48 // 48 hour + TokenTypeOAuth = "oauth" ) type Token struct { @@ -20,7 +22,7 @@ type Token struct { func NewToken(tokentype, extra string) *Token { return &Token{ - Token: NewRandomString(TOKEN_SIZE), + Token: NewRandomString(TokenSize), CreateAt: GetMillis(), Type: tokentype, Extra: extra, @@ -28,7 +30,7 @@ func NewToken(tokentype, extra string) *Token { } func (t *Token) IsValid() *AppError { - if len(t.Token) != TOKEN_SIZE { + if len(t.Token) != TokenSize { return NewAppError("Token.IsValid", "model.token.is_valid.size", nil, "", http.StatusInternalServerError) } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/typing_request.go b/vendor/github.com/mattermost/mattermost-server/v6/model/typing_request.go new file mode 100644 index 00000000..f7a34341 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/typing_request.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type TypingRequest struct { + ChannelId string `json:"channel_id"` + ParentId string `json:"parent_id"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/upload_session.go b/vendor/github.com/mattermost/mattermost-server/v6/model/upload_session.go new file mode 100644 index 00000000..0fb54ce6 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/upload_session.go @@ -0,0 +1,114 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "fmt" + "net/http" +) + +// UploadType defines the type of an upload. +type UploadType string + +const ( + UploadTypeAttachment UploadType = "attachment" + UploadTypeImport UploadType = "import" + IncompleteUploadSuffix = ".tmp" +) + +// UploadNoUserID is a "fake" user id used by the API layer when in local mode. +const UploadNoUserID = "nouser" + +// UploadSession contains information used to keep track of a file upload. +type UploadSession struct { + // The unique identifier for the session. + Id string `json:"id"` + // The type of the upload. + Type UploadType `json:"type"` + // The timestamp of creation. + CreateAt int64 `json:"create_at"` + // The id of the user performing the upload. + UserId string `json:"user_id"` + // The id of the channel to upload to. + ChannelId string `json:"channel_id,omitempty"` + // The name of the file to upload. + Filename string `json:"filename"` + // The path where the file is stored. + Path string `json:"-"` + // The size of the file to upload. + FileSize int64 `json:"file_size"` + // The amount of received data in bytes. If equal to FileSize it means the + // upload has finished. + FileOffset int64 `json:"file_offset"` + // Id of remote cluster if uploading for shared channel + RemoteId string `json:"remote_id"` + // Requested file id if uploading for shared channel + ReqFileId string `json:"req_file_id"` +} + +// PreSave is a utility function used to fill required information. +func (us *UploadSession) PreSave() { + if us.Id == "" { + us.Id = NewId() + } + + if us.CreateAt == 0 { + us.CreateAt = GetMillis() + } +} + +// IsValid validates an UploadType. It returns an error in case of +// failure. +func (t UploadType) IsValid() error { + switch t { + case UploadTypeAttachment: + return nil + case UploadTypeImport: + return nil + default: + } + return fmt.Errorf("invalid UploadType %s", t) +} + +// IsValid validates an UploadSession. It returns an error in case of +// failure. +func (us *UploadSession) IsValid() *AppError { + if !IsValidId(us.Id) { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if err := us.Type.IsValid(); err != nil { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.type.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if !IsValidId(us.UserId) && us.UserId != UploadNoUserID { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.user_id.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Type == UploadTypeAttachment && !IsValidId(us.ChannelId) { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.channel_id.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.CreateAt == 0 { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.create_at.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Filename == "" { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.filename.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.FileSize <= 0 { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.file_size.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.FileOffset < 0 || us.FileOffset > us.FileSize { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.file_offset.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Path == "" { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.path.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user.go similarity index 59% rename from vendor/github.com/mattermost/mattermost-server/v5/model/user.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/user.go index 629e83dc..698f3377 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user.go @@ -7,58 +7,67 @@ import ( "crypto/sha256" "encoding/json" "fmt" - "io" - "io/ioutil" - "math/rand" "net/http" "regexp" "sort" "strings" - "time" "unicode/utf8" - "github.com/mattermost/mattermost-server/v5/services/timezones" "golang.org/x/crypto/bcrypt" "golang.org/x/text/language" + + "github.com/mattermost/mattermost-server/v6/services/timezones" + "github.com/mattermost/mattermost-server/v6/shared/mlog" ) const ( - ME = "me" - USER_NOTIFY_ALL = "all" - USER_NOTIFY_HERE = "here" - USER_NOTIFY_MENTION = "mention" - USER_NOTIFY_NONE = "none" - DESKTOP_NOTIFY_PROP = "desktop" - DESKTOP_SOUND_NOTIFY_PROP = "desktop_sound" - MARK_UNREAD_NOTIFY_PROP = "mark_unread" - PUSH_NOTIFY_PROP = "push" - PUSH_STATUS_NOTIFY_PROP = "push_status" - EMAIL_NOTIFY_PROP = "email" - CHANNEL_MENTIONS_NOTIFY_PROP = "channel" - COMMENTS_NOTIFY_PROP = "comments" - MENTION_KEYS_NOTIFY_PROP = "mention_keys" - COMMENTS_NOTIFY_NEVER = "never" - COMMENTS_NOTIFY_ROOT = "root" - COMMENTS_NOTIFY_ANY = "any" - FIRST_NAME_NOTIFY_PROP = "first_name" - AUTO_RESPONDER_ACTIVE_NOTIFY_PROP = "auto_responder_active" - AUTO_RESPONDER_MESSAGE_NOTIFY_PROP = "auto_responder_message" - - DEFAULT_LOCALE = "en" - USER_AUTH_SERVICE_EMAIL = "email" - - USER_EMAIL_MAX_LENGTH = 128 - USER_NICKNAME_MAX_RUNES = 64 - USER_POSITION_MAX_RUNES = 128 - USER_FIRST_NAME_MAX_RUNES = 64 - USER_LAST_NAME_MAX_RUNES = 64 - USER_AUTH_DATA_MAX_LENGTH = 128 - USER_NAME_MAX_LENGTH = 64 - USER_NAME_MIN_LENGTH = 1 - USER_PASSWORD_MAX_LENGTH = 72 - USER_LOCALE_MAX_LENGTH = 5 + Me = "me" + UserNotifyAll = "all" + UserNotifyHere = "here" + UserNotifyMention = "mention" + UserNotifyNone = "none" + DesktopNotifyProp = "desktop" + DesktopSoundNotifyProp = "desktop_sound" + MarkUnreadNotifyProp = "mark_unread" + PushNotifyProp = "push" + PushStatusNotifyProp = "push_status" + EmailNotifyProp = "email" + ChannelMentionsNotifyProp = "channel" + CommentsNotifyProp = "comments" + MentionKeysNotifyProp = "mention_keys" + CommentsNotifyNever = "never" + CommentsNotifyRoot = "root" + CommentsNotifyAny = "any" + CommentsNotifyCRT = "crt" + FirstNameNotifyProp = "first_name" + AutoResponderActiveNotifyProp = "auto_responder_active" + AutoResponderMessageNotifyProp = "auto_responder_message" + DesktopThreadsNotifyProp = "desktop_threads" + PushThreadsNotifyProp = "push_threads" + EmailThreadsNotifyProp = "email_threads" + + DefaultLocale = "en" + UserAuthServiceEmail = "email" + + UserEmailMaxLength = 128 + UserNicknameMaxRunes = 64 + UserPositionMaxRunes = 128 + UserFirstNameMaxRunes = 64 + UserLastNameMaxRunes = 64 + UserAuthDataMaxLength = 128 + UserNameMaxLength = 64 + UserNameMinLength = 1 + UserPasswordMaxLength = 72 + UserLocaleMaxLength = 5 + UserTimezoneMaxRunes = 256 + UserRolesMaxLength = 256 ) +//msgp:tuple User + +// User contains the details about the user. +// This struct's serializer methods are auto-generated. If a new field is added/removed, +// please run make gen-serialized. type User struct { Id string `json:"id"` CreateAt int64 `json:"create_at,omitempty"` @@ -85,19 +94,29 @@ type User struct { Timezone StringMap `json:"timezone"` MfaActive bool `json:"mfa_active,omitempty"` MfaSecret string `json:"mfa_secret,omitempty"` - LastActivityAt int64 `db:"-" json:"last_activity_at,omitempty"` - IsBot bool `db:"-" json:"is_bot,omitempty"` - BotDescription string `db:"-" json:"bot_description,omitempty"` - BotLastIconUpdate int64 `db:"-" json:"bot_last_icon_update,omitempty"` - TermsOfServiceId string `db:"-" json:"terms_of_service_id,omitempty"` - TermsOfServiceCreateAt int64 `db:"-" json:"terms_of_service_create_at,omitempty"` + RemoteId *string `json:"remote_id,omitempty"` + LastActivityAt int64 `json:"last_activity_at,omitempty"` + IsBot bool `json:"is_bot,omitempty"` + BotDescription string `json:"bot_description,omitempty"` + BotLastIconUpdate int64 `json:"bot_last_icon_update,omitempty"` + TermsOfServiceId string `json:"terms_of_service_id,omitempty"` + TermsOfServiceCreateAt int64 `json:"terms_of_service_create_at,omitempty"` + DisableWelcomeEmail bool `json:"disable_welcome_email"` } +//msgp UserMap + +// UserMap is a map from a userId to a user object. +// It is used to generate methods which can be used for fast serialization/de-serialization. +type UserMap map[string]*User + +//msgp:ignore UserUpdate type UserUpdate struct { Old *User New *User } +//msgp:ignore UserPatch type UserPatch struct { Username *string `json:"username"` Password *string `json:"password,omitempty"` @@ -110,14 +129,17 @@ type UserPatch struct { NotifyProps StringMap `json:"notify_props,omitempty"` Locale *string `json:"locale"` Timezone StringMap `json:"timezone"` + RemoteId *string `json:"remote_id"` } +//msgp:ignore UserAuth type UserAuth struct { - Password string `json:"password,omitempty"` + Password string `json:"password,omitempty"` // DEPRECATED: It is not used. AuthData *string `json:"auth_data,omitempty"` AuthService string `json:"auth_service,omitempty"` } +//msgp:ignore UserForIndexing type UserForIndexing struct { Id string `json:"id"` Username string `json:"username"` @@ -131,6 +153,7 @@ type UserForIndexing struct { ChannelsIds []string `json:"channel_id"` } +//msgp:ignore ViewUsersRestrictions type ViewUsersRestrictions struct { Teams []string Channels []string @@ -147,6 +170,7 @@ func (r *ViewUsersRestrictions) Hash() string { return fmt.Sprintf("%x", hash.Sum(nil)) } +//msgp:ignore UserSlice type UserSlice []*User func (u UserSlice) Usernames() []string { @@ -238,7 +262,6 @@ func (u *User) DeepCopy() *User { // IsValid validates the user and returns an error if it isn't configured // correctly. func (u *User) IsValid() *AppError { - if !IsValidId(u.Id) { return InvalidUserError("id", "") } @@ -251,43 +274,49 @@ func (u *User) IsValid() *AppError { return InvalidUserError("update_at", u.Id) } - if !IsValidUsername(u.Username) { - return InvalidUserError("username", u.Id) + if u.IsRemote() { + if !IsValidUsernameAllowRemote(u.Username) { + return InvalidUserError("username", u.Id) + } + } else { + if !IsValidUsername(u.Username) { + return InvalidUserError("username", u.Id) + } } - if len(u.Email) > USER_EMAIL_MAX_LENGTH || len(u.Email) == 0 || !IsValidEmail(u.Email) { + if len(u.Email) > UserEmailMaxLength || u.Email == "" || !IsValidEmail(u.Email) { return InvalidUserError("email", u.Id) } - if utf8.RuneCountInString(u.Nickname) > USER_NICKNAME_MAX_RUNES { + if utf8.RuneCountInString(u.Nickname) > UserNicknameMaxRunes { return InvalidUserError("nickname", u.Id) } - if utf8.RuneCountInString(u.Position) > USER_POSITION_MAX_RUNES { + if utf8.RuneCountInString(u.Position) > UserPositionMaxRunes { return InvalidUserError("position", u.Id) } - if utf8.RuneCountInString(u.FirstName) > USER_FIRST_NAME_MAX_RUNES { + if utf8.RuneCountInString(u.FirstName) > UserFirstNameMaxRunes { return InvalidUserError("first_name", u.Id) } - if utf8.RuneCountInString(u.LastName) > USER_LAST_NAME_MAX_RUNES { + if utf8.RuneCountInString(u.LastName) > UserLastNameMaxRunes { return InvalidUserError("last_name", u.Id) } - if u.AuthData != nil && len(*u.AuthData) > USER_AUTH_DATA_MAX_LENGTH { + if u.AuthData != nil && len(*u.AuthData) > UserAuthDataMaxLength { return InvalidUserError("auth_data", u.Id) } - if u.AuthData != nil && len(*u.AuthData) > 0 && len(u.AuthService) == 0 { + if u.AuthData != nil && *u.AuthData != "" && u.AuthService == "" { return InvalidUserError("auth_data_type", u.Id) } - if len(u.Password) > 0 && u.AuthData != nil && len(*u.AuthData) > 0 { + if u.Password != "" && u.AuthData != nil && *u.AuthData != "" { return InvalidUserError("auth_data_pwd", u.Id) } - if len(u.Password) > USER_PASSWORD_MAX_LENGTH { + if len(u.Password) > UserPasswordMaxLength { return InvalidUserError("password_limit", u.Id) } @@ -295,6 +324,19 @@ func (u *User) IsValid() *AppError { return InvalidUserError("locale", u.Id) } + if len(u.Timezone) > 0 { + if tzJSON, err := json.Marshal(u.Timezone); err != nil { + return NewAppError("User.IsValid", "model.user.is_valid.marshal.app_error", nil, err.Error(), http.StatusInternalServerError) + } else if utf8.RuneCount(tzJSON) > UserTimezoneMaxRunes { + return InvalidUserError("timezone_limit", u.Id) + } + } + + if len(u.Roles) > UserRolesMaxLength { + return NewAppError("User.IsValid", "model.user.is_valid.roles_limit.app_error", + map[string]interface{}{"Limit": UserRolesMaxLength}, "user_id="+u.Id, http.StatusBadRequest) + } + return nil } @@ -347,7 +389,7 @@ func (u *User) PreSave() { u.MfaActive = false if u.Locale == "" { - u.Locale = DEFAULT_LOCALE + u.Locale = DefaultLocale } if u.Props == nil { @@ -362,11 +404,28 @@ func (u *User) PreSave() { u.Timezone = timezones.DefaultUserTimezone() } - if len(u.Password) > 0 { + if u.Password != "" { u.Password = HashPassword(u.Password) } } +// The following are some GraphQL methods necessary to return the +// data in float64 type. The spec doesn't support 64 bit integers, +// so we have to pass the data in float64. The _ at the end is +// a hack to keep the attribute name same in GraphQL schema. + +func (u *User) CreateAt_() float64 { + return float64(u.CreateAt) +} + +func (u *User) DeleteAt_() float64 { + return float64(u.DeleteAt) +} + +func (u *User) LastPictureUpdateAt() float64 { + return float64(u.LastPictureUpdate) +} + // PreUpdate should be run before updating the user in the db. func (u *User) PreUpdate() { u.Username = SanitizeUnicode(u.Username) @@ -390,30 +449,33 @@ func (u *User) PreUpdate() { if u.NotifyProps == nil || len(u.NotifyProps) == 0 { u.SetDefaultNotifications() - } else if _, ok := u.NotifyProps[MENTION_KEYS_NOTIFY_PROP]; ok { + } else if _, ok := u.NotifyProps[MentionKeysNotifyProp]; ok { // Remove any blank mention keys - splitKeys := strings.Split(u.NotifyProps[MENTION_KEYS_NOTIFY_PROP], ",") + splitKeys := strings.Split(u.NotifyProps[MentionKeysNotifyProp], ",") goodKeys := []string{} for _, key := range splitKeys { - if len(key) > 0 { + if key != "" { goodKeys = append(goodKeys, strings.ToLower(key)) } } - u.NotifyProps[MENTION_KEYS_NOTIFY_PROP] = strings.Join(goodKeys, ",") + u.NotifyProps[MentionKeysNotifyProp] = strings.Join(goodKeys, ",") } } func (u *User) SetDefaultNotifications() { u.NotifyProps = make(map[string]string) - u.NotifyProps[EMAIL_NOTIFY_PROP] = "true" - u.NotifyProps[PUSH_NOTIFY_PROP] = USER_NOTIFY_MENTION - u.NotifyProps[DESKTOP_NOTIFY_PROP] = USER_NOTIFY_MENTION - u.NotifyProps[DESKTOP_SOUND_NOTIFY_PROP] = "true" - u.NotifyProps[MENTION_KEYS_NOTIFY_PROP] = "" - u.NotifyProps[CHANNEL_MENTIONS_NOTIFY_PROP] = "true" - u.NotifyProps[PUSH_STATUS_NOTIFY_PROP] = STATUS_AWAY - u.NotifyProps[COMMENTS_NOTIFY_PROP] = COMMENTS_NOTIFY_NEVER - u.NotifyProps[FIRST_NAME_NOTIFY_PROP] = "false" + u.NotifyProps[EmailNotifyProp] = "true" + u.NotifyProps[PushNotifyProp] = UserNotifyMention + u.NotifyProps[DesktopNotifyProp] = UserNotifyMention + u.NotifyProps[DesktopSoundNotifyProp] = "true" + u.NotifyProps[MentionKeysNotifyProp] = "" + u.NotifyProps[ChannelMentionsNotifyProp] = "true" + u.NotifyProps[PushStatusNotifyProp] = StatusAway + u.NotifyProps[CommentsNotifyProp] = CommentsNotifyNever + u.NotifyProps[FirstNameNotifyProp] = "false" + u.NotifyProps[DesktopThreadsNotifyProp] = UserNotifyAll + u.NotifyProps[EmailThreadsNotifyProp] = UserNotifyAll + u.NotifyProps[PushThreadsNotifyProp] = UserNotifyAll } func (u *User) UpdateMentionKeysFromUsername(oldUsername string) { @@ -424,16 +486,16 @@ func (u *User) UpdateMentionKeysFromUsername(oldUsername string) { } } - u.NotifyProps[MENTION_KEYS_NOTIFY_PROP] = "" + u.NotifyProps[MentionKeysNotifyProp] = "" if len(nonUsernameKeys) > 0 { - u.NotifyProps[MENTION_KEYS_NOTIFY_PROP] += "," + strings.Join(nonUsernameKeys, ",") + u.NotifyProps[MentionKeysNotifyProp] += "," + strings.Join(nonUsernameKeys, ",") } } func (u *User) GetMentionKeys() []string { var keys []string - for _, key := range strings.Split(u.NotifyProps[MENTION_KEYS_NOTIFY_PROP], ",") { + for _, key := range strings.Split(u.NotifyProps[MentionKeysNotifyProp], ",") { trimmedKey := strings.TrimSpace(key) if trimmedKey == "" { @@ -486,22 +548,10 @@ func (u *User) Patch(patch *UserPatch) { if patch.Timezone != nil { u.Timezone = patch.Timezone } -} -// ToJson convert a User to a json string -func (u *User) ToJson() string { - b, _ := json.Marshal(u) - return string(b) -} - -func (u *UserPatch) ToJson() string { - b, _ := json.Marshal(u) - return string(b) -} - -func (u *UserAuth) ToJson() string { - b, _ := json.Marshal(u) - return string(b) + if patch.RemoteId != nil { + u.RemoteId = patch.RemoteId + } } // Generate a valid strong etag so the browser can cache the results @@ -535,13 +585,14 @@ func (u *User) SanitizeInput(isAdmin bool) { if !isAdmin { u.AuthData = NewString("") u.AuthService = "" + u.EmailVerified = false } u.LastPasswordUpdate = 0 u.LastPictureUpdate = 0 u.FailedAttempts = 0 - u.EmailVerified = false u.MfaActive = false u.MfaSecret = "" + u.Email = strings.TrimSpace(u.Email) } func (u *User) ClearNonProfileFields() { @@ -577,12 +628,45 @@ func (u *User) AddNotifyProp(key string, value string) { u.NotifyProps[key] = value } +func (u *User) SetCustomStatus(cs *CustomStatus) error { + u.MakeNonNil() + statusJSON, jsonErr := json.Marshal(cs) + if jsonErr != nil { + return jsonErr + } + u.Props[UserPropsKeyCustomStatus] = string(statusJSON) + return nil +} + +func (u *User) GetCustomStatus() *CustomStatus { + var o *CustomStatus + + data := u.Props[UserPropsKeyCustomStatus] + _ = json.Unmarshal([]byte(data), &o) + + return o +} + +func (u *User) CustomStatus() *CustomStatus { + var o *CustomStatus + + data := u.Props[UserPropsKeyCustomStatus] + _ = json.Unmarshal([]byte(data), &o) + + return o +} + +func (u *User) ClearCustomStatus() { + u.MakeNonNil() + u.Props[UserPropsKeyCustomStatus] = "" +} + func (u *User) GetFullName() string { - if len(u.FirstName) > 0 && len(u.LastName) > 0 { + if u.FirstName != "" && u.LastName != "" { return u.FirstName + " " + u.LastName - } else if len(u.FirstName) > 0 { + } else if u.FirstName != "" { return u.FirstName - } else if len(u.LastName) > 0 { + } else if u.LastName != "" { return u.LastName } else { return "" @@ -592,14 +676,14 @@ func (u *User) GetFullName() string { func (u *User) getDisplayName(baseName, nameFormat string) string { displayName := baseName - if nameFormat == SHOW_NICKNAME_FULLNAME { - if len(u.Nickname) > 0 { + if nameFormat == ShowNicknameFullName { + if u.Nickname != "" { displayName = u.Nickname - } else if fullName := u.GetFullName(); len(fullName) > 0 { + } else if fullName := u.GetFullName(); fullName != "" { displayName = fullName } - } else if nameFormat == SHOW_FULLNAME { - if fullName := u.GetFullName(); len(fullName) > 0 { + } else if nameFormat == ShowFullName { + if fullName := u.GetFullName(); fullName != "" { displayName = fullName } } @@ -645,23 +729,23 @@ func IsValidUserRoles(userRoles string) bool { return true } -// Make sure you acually want to use this function. In context.go there are functions to check permissions +// Make sure you actually want to use this function. In context.go there are functions to check permissions // This function should not be used to check permissions. func (u *User) IsGuest() bool { - return IsInRole(u.Roles, SYSTEM_GUEST_ROLE_ID) + return IsInRole(u.Roles, SystemGuestRoleId) } func (u *User) IsSystemAdmin() bool { - return IsInRole(u.Roles, SYSTEM_ADMIN_ROLE_ID) + return IsInRole(u.Roles, SystemAdminRoleId) } -// Make sure you acually want to use this function. In context.go there are functions to check permissions +// Make sure you actually want to use this function. In context.go there are functions to check permissions // This function should not be used to check permissions. func (u *User) IsInRole(inRole string) bool { return IsInRole(u.Roles, inRole) } -// Make sure you acually want to use this function. In context.go there are functions to check permissions +// Make sure you actually want to use this function. In context.go there are functions to check permissions // This function should not be used to check permissions. func IsInRole(userRoles string, inRole string) bool { roles := strings.Split(userRoles, " ") @@ -676,64 +760,81 @@ func IsInRole(userRoles string, inRole string) bool { } func (u *User) IsSSOUser() bool { - return u.AuthService != "" && u.AuthService != USER_AUTH_SERVICE_EMAIL + return u.AuthService != "" && u.AuthService != UserAuthServiceEmail } func (u *User) IsOAuthUser() bool { - return u.AuthService == USER_AUTH_SERVICE_GITLAB + return u.AuthService == ServiceGitlab || + u.AuthService == ServiceGoogle || + u.AuthService == ServiceOffice365 || + u.AuthService == ServiceOpenid } func (u *User) IsLDAPUser() bool { - return u.AuthService == USER_AUTH_SERVICE_LDAP + return u.AuthService == UserAuthServiceLdap } func (u *User) IsSAMLUser() bool { - return u.AuthService == USER_AUTH_SERVICE_SAML + return u.AuthService == UserAuthServiceSaml } func (u *User) GetPreferredTimezone() string { return GetPreferredTimezone(u.Timezone) } -// UserFromJson will decode the input and return a User -func UserFromJson(data io.Reader) *User { - var user *User - json.NewDecoder(data).Decode(&user) - return user -} - -func UserPatchFromJson(data io.Reader) *UserPatch { - var user *UserPatch - json.NewDecoder(data).Decode(&user) - return user +// IsRemote returns true if the user belongs to a remote cluster (has RemoteId). +func (u *User) IsRemote() bool { + return u.RemoteId != nil && *u.RemoteId != "" } -func UserAuthFromJson(data io.Reader) *UserAuth { - var user *UserAuth - json.NewDecoder(data).Decode(&user) - return user +// GetRemoteID returns the remote id for this user or "" if not a remote user. +func (u *User) GetRemoteID() string { + if u.RemoteId != nil { + return *u.RemoteId + } + return "" } -func UserMapToJson(u map[string]*User) string { - b, _ := json.Marshal(u) - return string(b) +// GetProp fetches a prop value by name. +func (u *User) GetProp(name string) (string, bool) { + val, ok := u.Props[name] + return val, ok } -func UserMapFromJson(data io.Reader) map[string]*User { - var users map[string]*User - json.NewDecoder(data).Decode(&users) - return users +// SetProp sets a prop value by name, creating the map if nil. +// Not thread safe. +func (u *User) SetProp(name string, value string) { + if u.Props == nil { + u.Props = make(map[string]string) + } + u.Props[name] = value } -func UserListToJson(u []*User) string { - b, _ := json.Marshal(u) - return string(b) +func (u *User) ToPatch() *UserPatch { + return &UserPatch{ + Username: &u.Username, Password: &u.Password, + Nickname: &u.Nickname, FirstName: &u.FirstName, LastName: &u.LastName, + Position: &u.Position, Email: &u.Email, + Props: u.Props, NotifyProps: u.NotifyProps, + Locale: &u.Locale, Timezone: u.Timezone, + } } -func UserListFromJson(data io.Reader) []*User { - var users []*User - json.NewDecoder(data).Decode(&users) - return users +func (u *UserPatch) SetField(fieldName string, fieldValue string) { + switch fieldName { + case "FirstName": + u.FirstName = &fieldValue + case "LastName": + u.LastName = &fieldValue + case "Nickname": + u.Nickname = &fieldValue + case "Email": + u.Email = &fieldValue + case "Position": + u.Position = &fieldValue + case "Username": + u.Username = &fieldValue + } } // HashPassword generates a hash using the bcrypt.GenerateFromPassword @@ -746,28 +847,18 @@ func HashPassword(password string) string { return string(hash) } -// ComparePassword compares the hash -func ComparePassword(hash string, password string) bool { - - if len(password) == 0 || len(hash) == 0 { - return false - } - - err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) - return err == nil -} - var validUsernameChars = regexp.MustCompile(`^[a-z0-9\.\-_]+$`) +var validUsernameCharsForRemote = regexp.MustCompile(`^[a-z0-9\.\-_:]+$`) -var restrictedUsernames = []string{ - "all", - "channel", - "matterbot", - "system", +var restrictedUsernames = map[string]struct{}{ + "all": {}, + "channel": {}, + "matterbot": {}, + "system": {}, } func IsValidUsername(s string) bool { - if len(s) < USER_NAME_MIN_LENGTH || len(s) > USER_NAME_MAX_LENGTH { + if len(s) < UserNameMinLength || len(s) > UserNameMaxLength { return false } @@ -775,17 +866,25 @@ func IsValidUsername(s string) bool { return false } - for _, restrictedUsername := range restrictedUsernames { - if s == restrictedUsername { - return false - } + _, found := restrictedUsernames[s] + return !found +} + +func IsValidUsernameAllowRemote(s string) bool { + if len(s) < UserNameMinLength || len(s) > UserNameMaxLength { + return false } - return true + if !validUsernameCharsForRemote.MatchString(s) { + return false + } + + _, found := restrictedUsernames[s] + return !found } -func CleanUsername(s string) string { - s = NormalizeUsername(strings.Replace(s, " ", "-", -1)) +func CleanUsername(username string) string { + s := NormalizeUsername(strings.Replace(username, " ", "-", -1)) for _, value := range reservedName { if s == value { @@ -806,38 +905,16 @@ func CleanUsername(s string) string { if !IsValidUsername(s) { s = "a" + NewId() + mlog.Warn("Generating new username since provided username was invalid", + mlog.String("provided_username", username), mlog.String("new_username", s)) } return s } -func IsValidUserNotifyLevel(notifyLevel string) bool { - return notifyLevel == CHANNEL_NOTIFY_ALL || - notifyLevel == CHANNEL_NOTIFY_MENTION || - notifyLevel == CHANNEL_NOTIFY_NONE -} - -func IsValidPushStatusNotifyLevel(notifyLevel string) bool { - return notifyLevel == STATUS_ONLINE || - notifyLevel == STATUS_AWAY || - notifyLevel == STATUS_OFFLINE -} - -func IsValidCommentsNotifyLevel(notifyLevel string) bool { - return notifyLevel == COMMENTS_NOTIFY_ANY || - notifyLevel == COMMENTS_NOTIFY_ROOT || - notifyLevel == COMMENTS_NOTIFY_NEVER -} - -func IsValidEmailBatchingInterval(emailInterval string) bool { - return emailInterval == PREFERENCE_EMAIL_INTERVAL_IMMEDIATELY || - emailInterval == PREFERENCE_EMAIL_INTERVAL_FIFTEEN || - emailInterval == PREFERENCE_EMAIL_INTERVAL_HOUR -} - func IsValidLocale(locale string) bool { if locale != "" { - if len(locale) > USER_LOCALE_MAX_LENGTH { + if len(locale) > UserLocaleMaxLength { return false } else if _, err := language.Parse(locale); err != nil { return false @@ -847,6 +924,7 @@ func IsValidLocale(locale string) bool { return true } +//msgp:ignore UserWithGroups type UserWithGroups struct { User GroupIDs *string `json:"-"` @@ -861,44 +939,14 @@ func (u *UserWithGroups) GetGroupIDs() []string { return nil } trimmed := strings.TrimSpace(*u.GroupIDs) - if len(trimmed) == 0 { + if trimmed == "" { return nil } return strings.Split(trimmed, ",") } +//msgp:ignore UsersWithGroupsAndCount type UsersWithGroupsAndCount struct { Users []*UserWithGroups `json:"users"` Count int64 `json:"total_count"` } - -func UsersWithGroupsAndCountFromJson(data io.Reader) *UsersWithGroupsAndCount { - uwg := &UsersWithGroupsAndCount{} - bodyBytes, _ := ioutil.ReadAll(data) - json.Unmarshal(bodyBytes, uwg) - return uwg -} - -var passwordRandomSource = rand.NewSource(time.Now().Unix()) -var passwordSpecialChars = "!$%^&*(),." -var passwordNumbers = "0123456789" -var passwordUpperCaseLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" -var passwordLowerCaseLetters = "abcdefghijklmnopqrstuvwxyz" -var passwordAllChars = passwordSpecialChars + passwordNumbers + passwordUpperCaseLetters + passwordLowerCaseLetters - -func GeneratePassword(minimumLength int) string { - r := rand.New(passwordRandomSource) - - // Make sure we are guaranteed at least one of each type to meet any possible password complexity requirements. - password := string([]rune(passwordUpperCaseLetters)[r.Intn(len(passwordUpperCaseLetters))]) + - string([]rune(passwordNumbers)[r.Intn(len(passwordNumbers))]) + - string([]rune(passwordLowerCaseLetters)[r.Intn(len(passwordLowerCaseLetters))]) + - string([]rune(passwordSpecialChars)[r.Intn(len(passwordSpecialChars))]) - - for len(password) < minimumLength { - i := r.Intn(len(passwordAllChars)) - password = password + string([]rune(passwordAllChars)[i]) - } - - return password -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_access_token.go similarity index 70% rename from vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/user_access_token.go index f458a6d9..dee31f18 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_access_token.go @@ -4,8 +4,6 @@ package model import ( - "encoding/json" - "io" "net/http" ) @@ -41,25 +39,3 @@ func (t *UserAccessToken) PreSave() { t.Id = NewId() t.IsActive = true } - -func (t *UserAccessToken) ToJson() string { - b, _ := json.Marshal(t) - return string(b) -} - -func UserAccessTokenFromJson(data io.Reader) *UserAccessToken { - var t *UserAccessToken - json.NewDecoder(data).Decode(&t) - return t -} - -func UserAccessTokenListToJson(t []*UserAccessToken) string { - b, _ := json.Marshal(t) - return string(b) -} - -func UserAccessTokenListFromJson(data io.Reader) []*UserAccessToken { - var t []*UserAccessToken - json.NewDecoder(data).Decode(&t) - return t -} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_access_token_search.go similarity index 66% rename from vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/user_access_token_search.go index d5f98f1a..97fcde12 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_access_token_search.go @@ -3,6 +3,6 @@ package model -const ( - USER_AUTH_SERVICE_LDAP = "ldap" -) +type UserAccessTokenSearch struct { + Term string `json:"term"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/user_autocomplete.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_autocomplete.go new file mode 100644 index 00000000..b07131b3 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_autocomplete.go @@ -0,0 +1,18 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type UserAutocompleteInChannel struct { + InChannel []*User `json:"in_channel"` + OutOfChannel []*User `json:"out_of_channel"` +} + +type UserAutocompleteInTeam struct { + InTeam []*User `json:"in_team"` +} + +type UserAutocomplete struct { + Users []*User `json:"users"` + OutOfChannel []*User `json:"out_of_channel,omitempty"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_count.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/user_count.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_get.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/user_get.go index 2748d735..0ba62f3f 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_get.go @@ -14,6 +14,8 @@ type UserGetOptions struct { NotInChannelId string // Filters the users in the group InGroupId string + // Filters the users not in the group + NotInGroupId string // Filters the users group constrained GroupConstrained bool // Filters the users without a team diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_search.go similarity index 81% rename from vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/user_search.go index 0a721eac..d0480fe5 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_search.go @@ -3,13 +3,8 @@ package model -import ( - "encoding/json" - "io" -) - -const USER_SEARCH_MAX_LIMIT = 1000 -const USER_SEARCH_DEFAULT_LIMIT = 100 +const UserSearchMaxLimit = 1000 +const UserSearchDefaultLimit = 100 // UserSearch captures the parameters provided by a client for initiating a user search. type UserSearch struct { @@ -27,24 +22,7 @@ type UserSearch struct { Roles []string `json:"roles"` ChannelRoles []string `json:"channel_roles"` TeamRoles []string `json:"team_roles"` -} - -// ToJson convert a User to a json string -func (u *UserSearch) ToJson() []byte { - b, _ := json.Marshal(u) - return b -} - -// UserSearchFromJson will decode the input and return a User -func UserSearchFromJson(data io.Reader) *UserSearch { - us := UserSearch{} - json.NewDecoder(data).Decode(&us) - - if us.Limit == 0 { - us.Limit = USER_SEARCH_DEFAULT_LIMIT - } - - return &us + NotInGroupId string `json:"not_in_group_id"` } // UserSearchOptions captures internal parameters derived from the user's permissions and a diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/user_serial_gen.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_serial_gen.go new file mode 100644 index 00000000..3bcb3cf7 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_serial_gen.go @@ -0,0 +1,842 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *User) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 33 { + err = msgp.ArrayError{Wanted: 33, Got: zb0001} + return + } + z.Id, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.CreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.UpdateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + z.DeleteAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + z.Username, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + z.Password, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + z.AuthData = nil + } else { + if z.AuthData == nil { + z.AuthData = new(string) + } + *z.AuthData, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + z.AuthService, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + z.Email, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + z.EmailVerified, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + z.Nickname, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + z.FirstName, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + z.LastName, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + z.Position, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.AllowMarketing, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + err = z.Props.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + err = z.NotifyProps.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + z.LastPasswordUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + z.LastPictureUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + z.FailedAttempts, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + z.Locale, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + err = z.Timezone.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + z.MfaActive, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + z.MfaSecret, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "RemoteId") + return + } + z.RemoteId = nil + } else { + if z.RemoteId == nil { + z.RemoteId = new(string) + } + *z.RemoteId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "RemoteId") + return + } + } + z.LastActivityAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.IsBot, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + z.BotDescription, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + z.BotLastIconUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + z.TermsOfServiceId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + z.TermsOfServiceCreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + z.DisableWelcomeEmail, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "DisableWelcomeEmail") + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *User) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 33 + err = en.Append(0xdc, 0x0, 0x21) + if err != nil { + return + } + err = en.WriteString(z.Id) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + err = en.WriteInt64(z.CreateAt) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + err = en.WriteInt64(z.UpdateAt) + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + err = en.WriteInt64(z.DeleteAt) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + err = en.WriteString(z.Username) + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + err = en.WriteString(z.Password) + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if z.AuthData == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteString(*z.AuthData) + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + err = en.WriteString(z.AuthService) + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + err = en.WriteString(z.Email) + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + err = en.WriteBool(z.EmailVerified) + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + err = en.WriteString(z.Nickname) + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + err = en.WriteString(z.FirstName) + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + err = en.WriteString(z.LastName) + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + err = en.WriteString(z.Position) + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + err = en.WriteBool(z.AllowMarketing) + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + err = z.Props.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + err = z.NotifyProps.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + err = en.WriteInt64(z.LastPasswordUpdate) + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + err = en.WriteInt64(z.LastPictureUpdate) + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + err = en.WriteInt(z.FailedAttempts) + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + err = en.WriteString(z.Locale) + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + err = z.Timezone.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + err = en.WriteBool(z.MfaActive) + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + err = en.WriteString(z.MfaSecret) + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + if z.RemoteId == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteString(*z.RemoteId) + if err != nil { + err = msgp.WrapError(err, "RemoteId") + return + } + } + err = en.WriteInt64(z.LastActivityAt) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + err = en.WriteBool(z.IsBot) + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + err = en.WriteString(z.BotDescription) + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + err = en.WriteInt64(z.BotLastIconUpdate) + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + err = en.WriteString(z.TermsOfServiceId) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + err = en.WriteInt64(z.TermsOfServiceCreateAt) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + err = en.WriteBool(z.DisableWelcomeEmail) + if err != nil { + err = msgp.WrapError(err, "DisableWelcomeEmail") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *User) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // array header, size 33 + o = append(o, 0xdc, 0x0, 0x21) + o = msgp.AppendString(o, z.Id) + o = msgp.AppendInt64(o, z.CreateAt) + o = msgp.AppendInt64(o, z.UpdateAt) + o = msgp.AppendInt64(o, z.DeleteAt) + o = msgp.AppendString(o, z.Username) + o = msgp.AppendString(o, z.Password) + if z.AuthData == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendString(o, *z.AuthData) + } + o = msgp.AppendString(o, z.AuthService) + o = msgp.AppendString(o, z.Email) + o = msgp.AppendBool(o, z.EmailVerified) + o = msgp.AppendString(o, z.Nickname) + o = msgp.AppendString(o, z.FirstName) + o = msgp.AppendString(o, z.LastName) + o = msgp.AppendString(o, z.Position) + o = msgp.AppendString(o, z.Roles) + o = msgp.AppendBool(o, z.AllowMarketing) + o, err = z.Props.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + o, err = z.NotifyProps.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + o = msgp.AppendInt64(o, z.LastPasswordUpdate) + o = msgp.AppendInt64(o, z.LastPictureUpdate) + o = msgp.AppendInt(o, z.FailedAttempts) + o = msgp.AppendString(o, z.Locale) + o, err = z.Timezone.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + o = msgp.AppendBool(o, z.MfaActive) + o = msgp.AppendString(o, z.MfaSecret) + if z.RemoteId == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendString(o, *z.RemoteId) + } + o = msgp.AppendInt64(o, z.LastActivityAt) + o = msgp.AppendBool(o, z.IsBot) + o = msgp.AppendString(o, z.BotDescription) + o = msgp.AppendInt64(o, z.BotLastIconUpdate) + o = msgp.AppendString(o, z.TermsOfServiceId) + o = msgp.AppendInt64(o, z.TermsOfServiceCreateAt) + o = msgp.AppendBool(o, z.DisableWelcomeEmail) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *User) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 33 { + err = msgp.ArrayError{Wanted: 33, Got: zb0001} + return + } + z.Id, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.CreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.UpdateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + z.DeleteAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + z.Username, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + z.Password, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.AuthData = nil + } else { + if z.AuthData == nil { + z.AuthData = new(string) + } + *z.AuthData, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + z.AuthService, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + z.Email, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + z.EmailVerified, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + z.Nickname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + z.FirstName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + z.LastName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + z.Position, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.AllowMarketing, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + bts, err = z.Props.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + bts, err = z.NotifyProps.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + z.LastPasswordUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + z.LastPictureUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + z.FailedAttempts, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + z.Locale, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + bts, err = z.Timezone.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + z.MfaActive, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + z.MfaSecret, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.RemoteId = nil + } else { + if z.RemoteId == nil { + z.RemoteId = new(string) + } + *z.RemoteId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RemoteId") + return + } + } + z.LastActivityAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.IsBot, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + z.BotDescription, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + z.BotLastIconUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + z.TermsOfServiceId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + z.TermsOfServiceCreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + z.DisableWelcomeEmail, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DisableWelcomeEmail") + return + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *User) Msgsize() (s int) { + s = 3 + msgp.StringPrefixSize + len(z.Id) + msgp.Int64Size + msgp.Int64Size + msgp.Int64Size + msgp.StringPrefixSize + len(z.Username) + msgp.StringPrefixSize + len(z.Password) + if z.AuthData == nil { + s += msgp.NilSize + } else { + s += msgp.StringPrefixSize + len(*z.AuthData) + } + s += msgp.StringPrefixSize + len(z.AuthService) + msgp.StringPrefixSize + len(z.Email) + msgp.BoolSize + msgp.StringPrefixSize + len(z.Nickname) + msgp.StringPrefixSize + len(z.FirstName) + msgp.StringPrefixSize + len(z.LastName) + msgp.StringPrefixSize + len(z.Position) + msgp.StringPrefixSize + len(z.Roles) + msgp.BoolSize + z.Props.Msgsize() + z.NotifyProps.Msgsize() + msgp.Int64Size + msgp.Int64Size + msgp.IntSize + msgp.StringPrefixSize + len(z.Locale) + z.Timezone.Msgsize() + msgp.BoolSize + msgp.StringPrefixSize + len(z.MfaSecret) + if z.RemoteId == nil { + s += msgp.NilSize + } else { + s += msgp.StringPrefixSize + len(*z.RemoteId) + } + s += msgp.Int64Size + msgp.BoolSize + msgp.StringPrefixSize + len(z.BotDescription) + msgp.Int64Size + msgp.StringPrefixSize + len(z.TermsOfServiceId) + msgp.Int64Size + msgp.BoolSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *UserMap) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(UserMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + zb0003-- + var zb0001 string + var zb0002 *User + zb0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err) + return + } + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + zb0002 = nil + } else { + if zb0002 == nil { + zb0002 = new(User) + } + err = zb0002.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + (*z)[zb0001] = zb0002 + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z UserMap) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteMapHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0004, zb0005 := range z { + err = en.WriteString(zb0004) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0005 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = zb0005.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, zb0004) + return + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z UserMap) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendMapHeader(o, uint32(len(z))) + for zb0004, zb0005 := range z { + o = msgp.AppendString(o, zb0004) + if zb0005 == nil { + o = msgp.AppendNil(o) + } else { + o, err = zb0005.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, zb0004) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *UserMap) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(UserMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + var zb0001 string + var zb0002 *User + zb0003-- + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + zb0002 = nil + } else { + if zb0002 == nil { + zb0002 = new(User) + } + bts, err = zb0002.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + (*z)[zb0001] = zb0002 + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z UserMap) Msgsize() (s int) { + s = msgp.MapHeaderSize + if z != nil { + for zb0004, zb0005 := range z { + _ = zb0005 + s += msgp.StringPrefixSize + len(zb0004) + if zb0005 == nil { + s += msgp.NilSize + } else { + s += zb0005.Msgsize() + } + } + } + return +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_terms_of_service.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_terms_of_service.go similarity index 79% rename from vendor/github.com/mattermost/mattermost-server/v5/model/user_terms_of_service.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/user_terms_of_service.go index 9a0f4f18..880c0786 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_terms_of_service.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_terms_of_service.go @@ -4,9 +4,7 @@ package model import ( - "encoding/json" "fmt" - "io" "net/http" ) @@ -32,11 +30,6 @@ func (ut *UserTermsOfService) IsValid() *AppError { return nil } -func (ut *UserTermsOfService) ToJson() string { - b, _ := json.Marshal(ut) - return string(b) -} - func (ut *UserTermsOfService) PreSave() { if ut.UserId == "" { ut.UserId = NewId() @@ -45,12 +38,6 @@ func (ut *UserTermsOfService) PreSave() { ut.CreateAt = GetMillis() } -func UserTermsOfServiceFromJson(data io.Reader) *UserTermsOfService { - var userTermsOfService *UserTermsOfService - json.NewDecoder(data).Decode(&userTermsOfService) - return userTermsOfService -} - func InvalidUserTermsOfServiceError(fieldName string, userTermsOfServiceId string) *AppError { id := fmt.Sprintf("model.user_terms_of_service.is_valid.%s.app_error", fieldName) details := "" diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/users_stats.go b/vendor/github.com/mattermost/mattermost-server/v6/model/users_stats.go new file mode 100644 index 00000000..1a7becf2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/users_stats.go @@ -0,0 +1,8 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +type UsersStats struct { + TotalUsersCount int64 `json:"total_users_count"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go b/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go similarity index 61% rename from vendor/github.com/mattermost/mattermost-server/v5/model/utils.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/utils.go index e75fb022..636e707e 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go @@ -6,6 +6,7 @@ package model import ( "bytes" "crypto/rand" + "database/sql/driver" "encoding/base32" "encoding/json" "fmt" @@ -16,26 +17,48 @@ import ( "net/mail" "net/url" "regexp" - "strconv" + "sort" "strings" + "sync" "time" "unicode" - goi18n "github.com/mattermost/go-i18n/i18n" + "github.com/mattermost/mattermost-server/v6/shared/i18n" "github.com/pborman/uuid" + "github.com/pkg/errors" ) const ( - LOWERCASE_LETTERS = "abcdefghijklmnopqrstuvwxyz" - UPPERCASE_LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - NUMBERS = "0123456789" - SYMBOLS = " !\"\\#$%&'()*+,-./:;<=>?@[]^_`|~" + LowercaseLetters = "abcdefghijklmnopqrstuvwxyz" + UppercaseLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + NUMBERS = "0123456789" + SYMBOLS = " !\"\\#$%&'()*+,-./:;<=>?@[]^_`|~" + BinaryParamKey = "MM_BINARY_PARAMETERS" ) type StringInterface map[string]interface{} -type StringMap map[string]string type StringArray []string +func (sa StringArray) Remove(input string) StringArray { + for index := range sa { + if sa[index] == input { + ret := make(StringArray, 0, len(sa)-1) + ret = append(ret, sa[:index]...) + return append(ret, sa[index+1:]...) + } + } + return sa +} + +func (sa StringArray) Contains(input string) bool { + for index := range sa { + if sa[index] == input { + return true + } + } + + return false +} func (sa StringArray) Equals(input StringArray) bool { if len(sa) != len(input) { @@ -52,10 +75,124 @@ func (sa StringArray) Equals(input StringArray) bool { return true } -var translateFunc goi18n.TranslateFunc = nil +// Value converts StringArray to database value +func (sa StringArray) Value() (driver.Value, error) { + j, err := json.Marshal(sa) + if err != nil { + return nil, err + } + // non utf8 characters are not supported https://mattermost.atlassian.net/browse/MM-41066 + return string(j), err +} + +// Scan converts database column value to StringArray +func (sa *StringArray) Scan(value interface{}) error { + if value == nil { + return nil + } + + buf, ok := value.([]byte) + if ok { + return json.Unmarshal(buf, sa) + } + + str, ok := value.(string) + if ok { + return json.Unmarshal([]byte(str), sa) + } -func AppErrorInit(t goi18n.TranslateFunc) { - translateFunc = t + return errors.New("received value is neither a byte slice nor string") +} + +// Scan converts database column value to StringMap +func (m *StringMap) Scan(value interface{}) error { + if value == nil { + return nil + } + + buf, ok := value.([]byte) + if ok { + return json.Unmarshal(buf, m) + } + + str, ok := value.(string) + if ok { + return json.Unmarshal([]byte(str), m) + } + + return errors.New("received value is neither a byte slice nor string") +} + +// Value converts StringMap to database value +func (m StringMap) Value() (driver.Value, error) { + ok := m[BinaryParamKey] + delete(m, BinaryParamKey) + buf, err := json.Marshal(m) + if err != nil { + return nil, err + } + if ok == "true" { + return append([]byte{0x01}, buf...), nil + } else if ok == "false" { + return buf, nil + } + // Key wasn't found. We fall back to the default case. + return string(buf), nil +} + +func (StringMap) ImplementsGraphQLType(name string) bool { + return name == "StringMap" +} + +func (m StringMap) MarshalJSON() ([]byte, error) { + return json.Marshal((map[string]string)(m)) +} + +func (m *StringMap) UnmarshalGraphQL(input interface{}) error { + json, ok := input.(map[string]string) + if !ok { + return errors.New("wrong type") + } + + *m = json + return nil +} + +func (si *StringInterface) Scan(value interface{}) error { + if value == nil { + return nil + } + + buf, ok := value.([]byte) + if ok { + return json.Unmarshal(buf, si) + } + + str, ok := value.(string) + if ok { + return json.Unmarshal([]byte(str), si) + } + + return errors.New("received value is neither a byte slice nor string") +} + +// Value converts StringInterface to database value +func (si StringInterface) Value() (driver.Value, error) { + j, err := json.Marshal(si) + if err != nil { + return nil, err + } + // non utf8 characters are not supported https://mattermost.atlassian.net/browse/MM-41066 + return string(j), err +} + +var translateFunc i18n.TranslateFunc +var translateFuncOnce sync.Once + +func AppErrorInit(t i18n.TranslateFunc) { + translateFuncOnce.Do(func() { + translateFunc = t + }) } type AppError struct { @@ -73,7 +210,7 @@ func (er *AppError) Error() string { return er.Where + ": " + er.Message + ", " + er.DetailedError } -func (er *AppError) Translate(T goi18n.TranslateFunc) { +func (er *AppError) Translate(T i18n.TranslateFunc) { if T == nil { er.Message = er.Id return @@ -86,21 +223,20 @@ func (er *AppError) Translate(T goi18n.TranslateFunc) { } } -func (er *AppError) SystemMessage(T goi18n.TranslateFunc) string { +func (er *AppError) SystemMessage(T i18n.TranslateFunc) string { if er.params == nil { return T(er.Id) - } else { - return T(er.Id, er.params) } + return T(er.Id, er.params) } -func (er *AppError) ToJson() string { +func (er *AppError) ToJSON() string { b, _ := json.Marshal(er) return string(b) } -// AppErrorFromJson will decode the input and return an AppError -func AppErrorFromJson(data io.Reader) *AppError { +// AppErrorFromJSON will decode the input and return an AppError +func AppErrorFromJSON(data io.Reader) *AppError { str := "" bytes, rerr := ioutil.ReadAll(data) if rerr != nil { @@ -112,11 +248,10 @@ func AppErrorFromJson(data io.Reader) *AppError { decoder := json.NewDecoder(strings.NewReader(str)) var er AppError err := decoder.Decode(&er) - if err == nil { - return &er - } else { - return NewAppError("AppErrorFromJson", "model.utils.decode_json.app_error", nil, "body: "+str, http.StatusInternalServerError) + if err != nil { + return NewAppError("AppErrorFromJSON", "model.utils.decode_json.app_error", nil, "body: "+str, http.StatusInternalServerError) } + return &er } func NewAppError(where string, id string, params map[string]interface{}, details string, status int) *AppError { @@ -163,14 +298,6 @@ func NewRandomString(length int) string { return encoding.EncodeToString(data)[:length] } -// NewRandomBase32String returns a base32 encoded string of a random slice -// of bytes of the given size. The resulting entropy will be (8 * size) bits. -func NewRandomBase32String(size int) string { - data := make([]byte, size) - rand.Read(data) - return base32.StdEncoding.EncodeToString(data) -} - // GetMillis is a convenience method to get milliseconds since epoch. func GetMillis() int64 { return time.Now().UnixNano() / int64(time.Millisecond) @@ -181,6 +308,11 @@ func GetMillisForTime(thisTime time.Time) int64 { return thisTime.UnixNano() / int64(time.Millisecond) } +// GetTimeForMillis is a convenience method to get time.Time for milliseconds since epoch. +func GetTimeForMillis(millis int64) time.Time { + return time.Unix(0, millis*int64(time.Millisecond)) +} + // PadDateStringZeros is a convenience method to pad 2 digit date parts with zeros to meet ISO 8601 format func PadDateStringZeros(dateString string) string { parts := strings.Split(dateString, "-") @@ -208,63 +340,60 @@ func GetEndOfDayMillis(thisTime time.Time, timeZoneOffset int) int64 { } func CopyStringMap(originalMap map[string]string) map[string]string { - copyMap := make(map[string]string) + copyMap := make(map[string]string, len(originalMap)) for k, v := range originalMap { copyMap[k] = v } return copyMap } -// MapToJson converts a map to a json string -func MapToJson(objmap map[string]string) string { +// MapToJSON converts a map to a json string +func MapToJSON(objmap map[string]string) string { b, _ := json.Marshal(objmap) return string(b) } -// MapBoolToJson converts a map to a json string -func MapBoolToJson(objmap map[string]bool) string { +// MapBoolToJSON converts a map to a json string +func MapBoolToJSON(objmap map[string]bool) string { b, _ := json.Marshal(objmap) return string(b) } -// MapFromJson will decode the key/value pair map -func MapFromJson(data io.Reader) map[string]string { +// MapFromJSON will decode the key/value pair map +func MapFromJSON(data io.Reader) map[string]string { decoder := json.NewDecoder(data) var objmap map[string]string if err := decoder.Decode(&objmap); err != nil { return make(map[string]string) - } else { - return objmap } + return objmap } -// MapFromJson will decode the key/value pair map -func MapBoolFromJson(data io.Reader) map[string]bool { +// MapFromJSON will decode the key/value pair map +func MapBoolFromJSON(data io.Reader) map[string]bool { decoder := json.NewDecoder(data) var objmap map[string]bool if err := decoder.Decode(&objmap); err != nil { return make(map[string]bool) - } else { - return objmap } + return objmap } -func ArrayToJson(objmap []string) string { +func ArrayToJSON(objmap []string) string { b, _ := json.Marshal(objmap) return string(b) } -func ArrayFromJson(data io.Reader) []string { +func ArrayFromJSON(data io.Reader) []string { decoder := json.NewDecoder(data) var objmap []string if err := decoder.Decode(&objmap); err != nil { return make([]string, 0) - } else { - return objmap } + return objmap } func ArrayFromInterface(data interface{}) []string { @@ -284,41 +413,30 @@ func ArrayFromInterface(data interface{}) []string { return stringArray } -func StringInterfaceToJson(objmap map[string]interface{}) string { +func StringInterfaceToJSON(objmap map[string]interface{}) string { b, _ := json.Marshal(objmap) return string(b) } -func StringInterfaceFromJson(data io.Reader) map[string]interface{} { +func StringInterfaceFromJSON(data io.Reader) map[string]interface{} { decoder := json.NewDecoder(data) var objmap map[string]interface{} if err := decoder.Decode(&objmap); err != nil { return make(map[string]interface{}) - } else { - return objmap } + return objmap } -func StringToJson(s string) string { - b, _ := json.Marshal(s) - return string(b) +// ToJSON serializes an arbitrary data type to JSON, discarding the error. +func ToJSON(v interface{}) []byte { + b, _ := json.Marshal(v) + return b } -func StringFromJson(data io.Reader) string { - decoder := json.NewDecoder(data) - - var s string - if err := decoder.Decode(&s); err != nil { - return "" - } else { - return s - } -} - -func GetServerIpAddress(iface string) string { +func GetServerIPAddress(iface string) string { var addrs []net.Addr - if len(iface) == 0 { + if iface == "" { var err error addrs, err = net.InterfaceAddrs() if err != nil { @@ -352,12 +470,12 @@ func GetServerIpAddress(iface string) string { return "" } -func IsLower(s string) bool { +func isLower(s string) bool { return strings.ToLower(s) == s } func IsValidEmail(email string) bool { - if !IsLower(email) { + if !isLower(email) { return false } @@ -377,6 +495,7 @@ var reservedName = []string{ "channel", "claim", "error", + "files", "help", "landing", "login", @@ -386,37 +505,37 @@ var reservedName = []string{ "plugins", "post", "signup", + "boards", + "playbooks", } func IsValidChannelIdentifier(s string) bool { - - if !IsValidAlphaNumHyphenUnderscore(s, true) { - return false - } - - if len(s) < CHANNEL_NAME_MIN_LENGTH { - return false - } - - return true + return validSimpleAlphaNum.MatchString(s) && len(s) >= ChannelNameMinLength } -func IsValidAlphaNum(s string) bool { - validAlphaNum := regexp.MustCompile(`^[a-z0-9]+([a-z\-0-9]+|(__)?)[a-z0-9]+$`) +var ( + validAlphaNum = regexp.MustCompile(`^[a-z0-9]+([a-z\-0-9]+|(__)?)[a-z0-9]+$`) + validAlphaNumHyphenUnderscore = regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]+$`) + validSimpleAlphaNum = regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]*$`) + validSimpleAlphaNumHyphenUnderscore = regexp.MustCompile(`^[a-zA-Z0-9\-_]+$`) + validSimpleAlphaNumHyphenUnderscorePlus = regexp.MustCompile(`^[a-zA-Z0-9+_-]+$`) +) +func isValidAlphaNum(s string) bool { return validAlphaNum.MatchString(s) } func IsValidAlphaNumHyphenUnderscore(s string, withFormat bool) bool { if withFormat { - validAlphaNumHyphenUnderscore := regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]+$`) return validAlphaNumHyphenUnderscore.MatchString(s) } - - validSimpleAlphaNumHyphenUnderscore := regexp.MustCompile(`^[a-zA-Z0-9\-_]+$`) return validSimpleAlphaNumHyphenUnderscore.MatchString(s) } +func IsValidAlphaNumHyphenUnderscorePlus(s string) bool { + return validSimpleAlphaNumHyphenUnderscorePlus.MatchString(s) +} + func Etag(parts ...interface{}) string { etag := CurrentVersion @@ -428,10 +547,12 @@ func Etag(parts ...interface{}) string { return etag } -var validHashtag = regexp.MustCompile(`^(#\pL[\pL\d\-_.]*[\pL\d])$`) -var puncStart = regexp.MustCompile(`^[^\pL\d\s#]+`) -var hashtagStart = regexp.MustCompile(`^#{2,}`) -var puncEnd = regexp.MustCompile(`[^\pL\d\s]+$`) +var ( + validHashtag = regexp.MustCompile(`^(#\pL[\pL\d\-_.]*[\pL\d])$`) + puncStart = regexp.MustCompile(`^[^\pL\d\s#]+`) + hashtagStart = regexp.MustCompile(`^#{2,}`) + puncEnd = regexp.MustCompile(`[^\pL\d\s]+$`) +) func ParseHashtags(text string) (string, string) { words := strings.Fields(text) @@ -466,87 +587,18 @@ func ParseHashtags(text string) (string, string) { return strings.TrimSpace(hashtagString), strings.TrimSpace(plainString) } -func IsFileExtImage(ext string) bool { - ext = strings.ToLower(ext) - for _, imgExt := range IMAGE_EXTENSIONS { - if ext == imgExt { - return true - } - } - return false -} - -func GetImageMimeType(ext string) string { - ext = strings.ToLower(ext) - if len(IMAGE_MIME_TYPES[ext]) == 0 { - return "image" - } else { - return IMAGE_MIME_TYPES[ext] - } -} - func ClearMentionTags(post string) string { post = strings.Replace(post, "", "", -1) post = strings.Replace(post, "", "", -1) return post } -func IsValidHttpUrl(rawUrl string) bool { - if strings.Index(rawUrl, "http://") != 0 && strings.Index(rawUrl, "https://") != 0 { +func IsValidHTTPURL(rawURL string) bool { + if strings.Index(rawURL, "http://") != 0 && strings.Index(rawURL, "https://") != 0 { return false } - if _, err := url.ParseRequestURI(rawUrl); err != nil { - return false - } - - return true -} - -func IsValidTurnOrStunServer(rawUri string) bool { - if strings.Index(rawUri, "turn:") != 0 && strings.Index(rawUri, "stun:") != 0 { - return false - } - - if _, err := url.ParseRequestURI(rawUri); err != nil { - return false - } - - return true -} - -func IsSafeLink(link *string) bool { - if link != nil { - if IsValidHttpUrl(*link) { - return true - } else if strings.HasPrefix(*link, "/") { - return true - } else { - return false - } - } - - return true -} - -func IsValidWebsocketUrl(rawUrl string) bool { - if strings.Index(rawUrl, "ws://") != 0 && strings.Index(rawUrl, "wss://") != 0 { - return false - } - - if _, err := url.ParseRequestURI(rawUrl); err != nil { - return false - } - - return true -} - -func IsValidTrueOrFalseString(value string) bool { - return value == "true" || value == "false" -} - -func IsValidNumberString(value string) bool { - if _, err := strconv.Atoi(value); err != nil { + if u, err := url.ParseRequestURI(rawURL); err != nil || u.Scheme == "" || u.Host == "" { return false } @@ -567,73 +619,24 @@ func IsValidId(value string) bool { return true } -// Copied from https://golang.org/src/net/dnsclient.go#L119 -func IsDomainName(s string) bool { - // See RFC 1035, RFC 3696. - // Presentation format has dots before every label except the first, and the - // terminal empty label is optional here because we assume fully-qualified - // (absolute) input. We must therefore reserve space for the first and last - // labels' length octets in wire format, where they are necessary and the - // maximum total length is 255. - // So our _effective_ maximum is 253, but 254 is not rejected if the last - // character is a dot. - l := len(s) - if l == 0 || l > 254 || l == 254 && s[l-1] != '.' { - return false - } - - last := byte('.') - ok := false // Ok once we've seen a letter. - partlen := 0 - for i := 0; i < len(s); i++ { - c := s[i] - switch { - default: - return false - case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_': - ok = true - partlen++ - case '0' <= c && c <= '9': - // fine - partlen++ - case c == '-': - // Byte before dash cannot be dot. - if last == '.' { - return false - } - partlen++ - case c == '.': - // Byte before dot cannot be dot, dash. - if last == '.' || last == '-' { - return false - } - if partlen > 63 || partlen == 0 { - return false - } - partlen = 0 - } - last = c - } - if last == '-' || partlen > 63 { - return false - } - - return ok -} - +// RemoveDuplicateStrings does an in-place removal of duplicate strings +// from the input slice. The original slice gets modified. func RemoveDuplicateStrings(in []string) []string { - out := []string{} - seen := make(map[string]bool, len(in)) - - for _, item := range in { - if !seen[item] { - out = append(out, item) - - seen[item] = true + // In-place de-dup. + // Copied from https://github.com/golang/go/wiki/SliceTricks#in-place-deduplicate-comparable + if len(in) == 0 { + return in + } + sort.Strings(in) + j := 0 + for i := 1; i < len(in); i++ { + if in[j] == in[i] { + continue } + j++ + in[j] = in[i] } - - return out + return in[:j+1] } func GetPreferredTimezone(timezone StringMap) string { @@ -644,27 +647,14 @@ func GetPreferredTimezone(timezone StringMap) string { return timezone["manualTimezone"] } -// IsSamlFile checks if filename is a SAML file. -func IsSamlFile(saml *SamlSettings, filename string) bool { - return filename == *saml.PublicCertificateFile || filename == *saml.PrivateKeyFile || filename == *saml.IdpCertificateFile -} - -func AsStringBoolMap(list []string) map[string]bool { - listMap := map[string]bool{} - for _, p := range list { - listMap[p] = true - } - return listMap -} - // SanitizeUnicode will remove undesirable Unicode characters from a string. func SanitizeUnicode(s string) string { - return strings.Map(filterBlacklist, s) + return strings.Map(filterBlocklist, s) } -// filterBlacklist returns `r` if it is not in the blacklist, otherwise drop (-1). -// Blacklist is taken from https://www.w3.org/TR/unicode-xml/#Charlist -func filterBlacklist(r rune) rune { +// filterBlocklist returns `r` if it is not in the blocklist, otherwise drop (-1). +// Blocklist is taken from https://www.w3.org/TR/unicode-xml/#Charlist +func filterBlocklist(r rune) rune { const drop = -1 switch r { case '\u0340', '\u0341': // clones of grave and acute; deprecated in Unicode diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/version.go b/vendor/github.com/mattermost/mattermost-server/v6/model/version.go similarity index 92% rename from vendor/github.com/mattermost/mattermost-server/v5/model/version.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/version.go index 2a27fb1a..3c8bf553 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/version.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/version.go @@ -13,6 +13,29 @@ import ( // It should be maintained in chronological order with most current // release at the front of the list. var versions = []string{ + "6.7.2", + "6.7.1", + "6.7.0", + "6.6.0", + "6.5.0", + "6.4.0", + "6.3.0", + "6.2.0", + "6.1.0", + "6.0.0", + "5.39.0", + "5.38.0", + "5.37.0", + "5.36.0", + "5.35.0", + "5.34.0", + "5.33.0", + "5.32.0", + "5.31.0", + "5.30.0", + "5.29.0", + "5.28.0", + "5.27.0", "5.26.0", "5.25.0", "5.24.0", @@ -144,9 +167,8 @@ func IsCurrentVersion(versionToCheck string) bool { if toCheckMajor == currentMajor && toCheckMinor == currentMinor { return true - } else { - return false } + return false } func IsPreviousVersionsSupported(versionToCheck string) bool { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_client.go b/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_client.go similarity index 79% rename from vendor/github.com/mattermost/mattermost-server/v5/model/websocket_client.go rename to vendor/github.com/mattermost/mattermost-server/v6/model/websocket_client.go index 72ca4a8f..b80d4777 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_client.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_client.go @@ -6,16 +6,20 @@ package model import ( "bytes" "encoding/json" + "fmt" "net/http" "sync/atomic" "time" + "github.com/mattermost/mattermost-server/v6/shared/mlog" + "github.com/gorilla/websocket" + "github.com/vmihailenco/msgpack/v5" ) const ( - SOCKET_MAX_MESSAGE_SIZE_KB = 8 * 1024 // 8KB - PING_TIMEOUT_BUFFER_SECONDS = 5 + SocketMaxMessageSizeKb = 8 * 1024 // 8KB + PingTimeoutBufferSeconds = 5 ) type msgType int @@ -23,6 +27,7 @@ type msgType int const ( msgTypeJSON msgType = iota + 1 msgTypePong + msgTypeBinary ) type writeMessage struct { @@ -35,11 +40,11 @@ const avgReadMsgSizeBytes = 1024 // WebSocketClient stores the necessary information required to // communicate with a WebSocket endpoint. // A client must read from PingTimeoutChannel, EventChannel and ResponseChannel to prevent -// deadlocks from occuring in the program. +// deadlocks from occurring in the program. type WebSocketClient struct { - Url string // The location of the server like "ws://localhost:8065" - ApiUrl string // The API location of the server like "ws://localhost:8065/api/v3" - ConnectUrl string // The WebSocket URL to connect to like "ws://localhost:8065/api/v3/path/to/websocket" + URL string // The location of the server like "ws://localhost:8065" + APIURL string // The API location of the server like "ws://localhost:8065/api/v3" + ConnectURL string // The WebSocket URL to connect to like "ws://localhost:8065/api/v3/path/to/websocket" Conn *websocket.Conn // The WebSocket connection AuthToken string // The token used to open the WebSocket connection Sequence int64 // The ever-incrementing sequence attached to each WebSocket action @@ -59,22 +64,38 @@ type WebSocketClient struct { // NewWebSocketClient constructs a new WebSocket client with convenience // methods for talking to the server. -func NewWebSocketClient(url, authToken string) (*WebSocketClient, *AppError) { +func NewWebSocketClient(url, authToken string) (*WebSocketClient, error) { return NewWebSocketClientWithDialer(websocket.DefaultDialer, url, authToken) } +func NewReliableWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken, connID string, seqNo int, withAuthHeader bool) (*WebSocketClient, error) { + connectURL := url + APIURLSuffix + "/websocket" + fmt.Sprintf("?connection_id=%s&sequence_number=%d", connID, seqNo) + var header http.Header + if withAuthHeader { + header = http.Header{ + "Authorization": []string{"Bearer " + authToken}, + } + } + + return makeClient(dialer, url, connectURL, authToken, header) +} + // NewWebSocketClientWithDialer constructs a new WebSocket client with convenience // methods for talking to the server using a custom dialer. -func NewWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, *AppError) { - conn, _, err := dialer.Dial(url+API_URL_SUFFIX+"/websocket", nil) +func NewWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, error) { + return makeClient(dialer, url, url+APIURLSuffix+"/websocket", authToken, nil) +} + +func makeClient(dialer *websocket.Dialer, url, connectURL, authToken string, header http.Header) (*WebSocketClient, error) { + conn, _, err := dialer.Dial(connectURL, header) if err != nil { return nil, NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) } client := &WebSocketClient{ - Url: url, - ApiUrl: url + API_URL_SUFFIX, - ConnectUrl: url + API_URL_SUFFIX + "/websocket", + URL: url, + APIURL: url + APIURLSuffix, + ConnectURL: connectURL, Conn: conn, AuthToken: authToken, Sequence: 1, @@ -90,34 +111,34 @@ func NewWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken strin client.configurePingHandling() go client.writer() - client.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": authToken}) + client.SendMessage(WebsocketAuthenticationChallenge, map[string]interface{}{"token": authToken}) return client, nil } // NewWebSocketClient4 constructs a new WebSocket client with convenience // methods for talking to the server. Uses the v4 endpoint. -func NewWebSocketClient4(url, authToken string) (*WebSocketClient, *AppError) { +func NewWebSocketClient4(url, authToken string) (*WebSocketClient, error) { return NewWebSocketClient4WithDialer(websocket.DefaultDialer, url, authToken) } // NewWebSocketClient4WithDialer constructs a new WebSocket client with convenience // methods for talking to the server using a custom dialer. Uses the v4 endpoint. -func NewWebSocketClient4WithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, *AppError) { +func NewWebSocketClient4WithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, error) { return NewWebSocketClientWithDialer(dialer, url, authToken) } -// Connect creates a websocket connection with the given ConnectUrl. +// Connect creates a websocket connection with the given ConnectURL. // This is racy and error-prone should not be used. Use any of the New* functions to create a websocket. func (wsc *WebSocketClient) Connect() *AppError { return wsc.ConnectWithDialer(websocket.DefaultDialer) } -// ConnectWithDialer creates a websocket connection with the given ConnectUrl using the dialer. +// ConnectWithDialer creates a websocket connection with the given ConnectURL using the dialer. // This is racy and error-prone and should not be used. Use any of the New* functions to create a websocket. func (wsc *WebSocketClient) ConnectWithDialer(dialer *websocket.Dialer) *AppError { var err error - wsc.Conn, _, err = dialer.Dial(wsc.ConnectUrl, nil) + wsc.Conn, _, err = dialer.Dial(wsc.ConnectURL, nil) if err != nil { return NewAppError("Connect", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) } @@ -136,7 +157,7 @@ func (wsc *WebSocketClient) ConnectWithDialer(dialer *websocket.Dialer) *AppErro wsc.EventChannel = make(chan *WebSocketEvent, 100) wsc.ResponseChannel = make(chan *WebSocketResponse, 100) - wsc.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": wsc.AuthToken}) + wsc.SendMessage(WebsocketAuthenticationChallenge, map[string]interface{}{"token": wsc.AuthToken}) return nil } @@ -163,6 +184,10 @@ func (wsc *WebSocketClient) writer() { switch msg.msgType { case msgTypeJSON: wsc.Conn.WriteJSON(msg.data) + case msgTypeBinary: + if data, ok := msg.data.([]byte); ok { + wsc.Conn.WriteMessage(websocket.BinaryMessage, data) + } case msgTypePong: wsc.Conn.WriteMessage(websocket.PongMessage, []byte{}) } @@ -224,8 +249,9 @@ func (wsc *WebSocketClient) Listen() { return } - event := WebSocketEventFromJson(bytes.NewReader(buf.Bytes())) - if event == nil { + event, jsonErr := WebSocketEventFromJSON(bytes.NewReader(buf.Bytes())) + if jsonErr != nil { + mlog.Warn("Failed to decode from JSON", mlog.Err(jsonErr)) continue } if event.IsValid() { @@ -255,6 +281,26 @@ func (wsc *WebSocketClient) SendMessage(action string, data map[string]interface } } +func (wsc *WebSocketClient) SendBinaryMessage(action string, data map[string]interface{}) error { + req := &WebSocketRequest{} + req.Seq = wsc.Sequence + req.Action = action + req.Data = data + + binaryData, err := msgpack.Marshal(req) + if err != nil { + return fmt.Errorf("failed to marshal request to msgpack: %w", err) + } + + wsc.Sequence++ + wsc.writeChan <- writeMessage{ + msgType: msgTypeBinary, + data: binaryData, + } + + return nil +} + // UserTyping will push a user_typing event out to all connected users // who are in the specified channel func (wsc *WebSocketClient) UserTyping(channelId, parentId string) { @@ -282,7 +328,7 @@ func (wsc *WebSocketClient) GetStatusesByIds(userIds []string) { func (wsc *WebSocketClient) configurePingHandling() { wsc.Conn.SetPingHandler(wsc.pingHandler) - wsc.pingTimeoutTimer = time.NewTimer(time.Second * (60 + PING_TIMEOUT_BUFFER_SECONDS)) + wsc.pingTimeoutTimer = time.NewTimer(time.Second * (60 + PingTimeoutBufferSeconds)) go wsc.pingWatchdog() } @@ -309,11 +355,11 @@ func (wsc *WebSocketClient) pingWatchdog() { if !wsc.pingTimeoutTimer.Stop() { <-wsc.pingTimeoutTimer.C } - wsc.pingTimeoutTimer.Reset(time.Second * (60 + PING_TIMEOUT_BUFFER_SECONDS)) + wsc.pingTimeoutTimer.Reset(time.Second * (60 + PingTimeoutBufferSeconds)) case <-wsc.pingTimeoutTimer.C: wsc.PingTimeoutChannel <- true - wsc.pingTimeoutTimer.Reset(time.Second * (60 + PING_TIMEOUT_BUFFER_SECONDS)) + wsc.pingTimeoutTimer.Reset(time.Second * (60 + PingTimeoutBufferSeconds)) case <-wsc.quitPingWatchdog: return } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_message.go new file mode 100644 index 00000000..38e42bb2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_message.go @@ -0,0 +1,363 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" +) + +const ( + WebsocketEventTyping = "typing" + WebsocketEventPosted = "posted" + WebsocketEventPostEdited = "post_edited" + WebsocketEventPostDeleted = "post_deleted" + WebsocketEventPostUnread = "post_unread" + WebsocketEventChannelConverted = "channel_converted" + WebsocketEventChannelCreated = "channel_created" + WebsocketEventChannelDeleted = "channel_deleted" + WebsocketEventChannelRestored = "channel_restored" + WebsocketEventChannelUpdated = "channel_updated" + WebsocketEventChannelMemberUpdated = "channel_member_updated" + WebsocketEventChannelSchemeUpdated = "channel_scheme_updated" + WebsocketEventDirectAdded = "direct_added" + WebsocketEventGroupAdded = "group_added" + WebsocketEventNewUser = "new_user" + WebsocketEventAddedToTeam = "added_to_team" + WebsocketEventLeaveTeam = "leave_team" + WebsocketEventUpdateTeam = "update_team" + WebsocketEventDeleteTeam = "delete_team" + WebsocketEventRestoreTeam = "restore_team" + WebsocketEventUpdateTeamScheme = "update_team_scheme" + WebsocketEventUserAdded = "user_added" + WebsocketEventUserUpdated = "user_updated" + WebsocketEventUserRoleUpdated = "user_role_updated" + WebsocketEventMemberroleUpdated = "memberrole_updated" + WebsocketEventUserRemoved = "user_removed" + WebsocketEventPreferenceChanged = "preference_changed" + WebsocketEventPreferencesChanged = "preferences_changed" + WebsocketEventPreferencesDeleted = "preferences_deleted" + WebsocketEventEphemeralMessage = "ephemeral_message" + WebsocketEventStatusChange = "status_change" + WebsocketEventHello = "hello" + WebsocketAuthenticationChallenge = "authentication_challenge" + WebsocketEventReactionAdded = "reaction_added" + WebsocketEventReactionRemoved = "reaction_removed" + WebsocketEventResponse = "response" + WebsocketEventEmojiAdded = "emoji_added" + WebsocketEventChannelViewed = "channel_viewed" + WebsocketEventPluginStatusesChanged = "plugin_statuses_changed" + WebsocketEventPluginEnabled = "plugin_enabled" + WebsocketEventPluginDisabled = "plugin_disabled" + WebsocketEventRoleUpdated = "role_updated" + WebsocketEventLicenseChanged = "license_changed" + WebsocketEventConfigChanged = "config_changed" + WebsocketEventOpenDialog = "open_dialog" + WebsocketEventGuestsDeactivated = "guests_deactivated" + WebsocketEventUserActivationStatusChange = "user_activation_status_change" + WebsocketEventReceivedGroup = "received_group" + WebsocketEventReceivedGroupAssociatedToTeam = "received_group_associated_to_team" + WebsocketEventReceivedGroupNotAssociatedToTeam = "received_group_not_associated_to_team" + WebsocketEventReceivedGroupAssociatedToChannel = "received_group_associated_to_channel" + WebsocketEventReceivedGroupNotAssociatedToChannel = "received_group_not_associated_to_channel" + WebsocketEventGroupMemberDelete = "group_member_deleted" + WebsocketEventGroupMemberAdd = "group_member_add" + WebsocketEventSidebarCategoryCreated = "sidebar_category_created" + WebsocketEventSidebarCategoryUpdated = "sidebar_category_updated" + WebsocketEventSidebarCategoryDeleted = "sidebar_category_deleted" + WebsocketEventSidebarCategoryOrderUpdated = "sidebar_category_order_updated" + WebsocketWarnMetricStatusReceived = "warn_metric_status_received" + WebsocketWarnMetricStatusRemoved = "warn_metric_status_removed" + WebsocketEventCloudPaymentStatusUpdated = "cloud_payment_status_updated" + WebsocketEventThreadUpdated = "thread_updated" + WebsocketEventThreadFollowChanged = "thread_follow_changed" + WebsocketEventThreadReadChanged = "thread_read_changed" + WebsocketFirstAdminVisitMarketplaceStatusReceived = "first_admin_visit_marketplace_status_received" +) + +type WebSocketMessage interface { + ToJSON() ([]byte, error) + IsValid() bool + EventType() string +} + +type WebsocketBroadcast struct { + OmitUsers map[string]bool `json:"omit_users"` // broadcast is omitted for users listed here + UserId string `json:"user_id"` // broadcast only occurs for this user + ChannelId string `json:"channel_id"` // broadcast only occurs for users in this channel + TeamId string `json:"team_id"` // broadcast only occurs for users in this team + ContainsSanitizedData bool `json:"-"` + ContainsSensitiveData bool `json:"-"` + // ReliableClusterSend indicates whether or not the message should + // be sent through the cluster using the reliable, TCP backed channel. + ReliableClusterSend bool `json:"-"` +} + +func (wb *WebsocketBroadcast) copy() *WebsocketBroadcast { + if wb == nil { + return nil + } + + var c WebsocketBroadcast + if wb.OmitUsers != nil { + c.OmitUsers = make(map[string]bool, len(wb.OmitUsers)) + for k, v := range wb.OmitUsers { + c.OmitUsers[k] = v + } + } + c.UserId = wb.UserId + c.ChannelId = wb.ChannelId + c.TeamId = wb.TeamId + c.ContainsSanitizedData = wb.ContainsSanitizedData + c.ContainsSensitiveData = wb.ContainsSensitiveData + + return &c +} + +type precomputedWebSocketEventJSON struct { + Event json.RawMessage + Data json.RawMessage + Broadcast json.RawMessage +} + +func (p *precomputedWebSocketEventJSON) copy() *precomputedWebSocketEventJSON { + if p == nil { + return nil + } + + var c precomputedWebSocketEventJSON + + if p.Event != nil { + c.Event = make([]byte, len(p.Event)) + copy(c.Event, p.Event) + } + + if p.Data != nil { + c.Data = make([]byte, len(p.Data)) + copy(c.Data, p.Data) + } + + if p.Broadcast != nil { + c.Broadcast = make([]byte, len(p.Broadcast)) + copy(c.Broadcast, p.Broadcast) + } + + return &c +} + +// webSocketEventJSON mirrors WebSocketEvent to make some of its unexported fields serializable +type webSocketEventJSON struct { + Event string `json:"event"` + Data map[string]interface{} `json:"data"` + Broadcast *WebsocketBroadcast `json:"broadcast"` + Sequence int64 `json:"seq"` +} + +type WebSocketEvent struct { + event string + data map[string]interface{} + broadcast *WebsocketBroadcast + sequence int64 + precomputedJSON *precomputedWebSocketEventJSON +} + +// PrecomputeJSON precomputes and stores the serialized JSON for all fields other than Sequence. +// This makes ToJSON much more efficient when sending the same event to multiple connections. +func (ev *WebSocketEvent) PrecomputeJSON() *WebSocketEvent { + copy := ev.Copy() + event, _ := json.Marshal(copy.event) + data, _ := json.Marshal(copy.data) + broadcast, _ := json.Marshal(copy.broadcast) + copy.precomputedJSON = &precomputedWebSocketEventJSON{ + Event: json.RawMessage(event), + Data: json.RawMessage(data), + Broadcast: json.RawMessage(broadcast), + } + return copy +} + +func (ev *WebSocketEvent) Add(key string, value interface{}) { + ev.data[key] = value +} + +func NewWebSocketEvent(event, teamId, channelId, userId string, omitUsers map[string]bool) *WebSocketEvent { + return &WebSocketEvent{ + event: event, + data: make(map[string]interface{}), + broadcast: &WebsocketBroadcast{ + TeamId: teamId, + ChannelId: channelId, + UserId: userId, + OmitUsers: omitUsers}, + } +} + +func (ev *WebSocketEvent) Copy() *WebSocketEvent { + copy := &WebSocketEvent{ + event: ev.event, + data: ev.data, + broadcast: ev.broadcast, + sequence: ev.sequence, + precomputedJSON: ev.precomputedJSON, + } + return copy +} + +func (ev *WebSocketEvent) DeepCopy() *WebSocketEvent { + var dataCopy map[string]interface{} + if ev.data != nil { + dataCopy = make(map[string]interface{}, len(ev.data)) + for k, v := range ev.data { + dataCopy[k] = v + } + } + + copy := &WebSocketEvent{ + event: ev.event, + data: dataCopy, + broadcast: ev.broadcast.copy(), + sequence: ev.sequence, + precomputedJSON: ev.precomputedJSON.copy(), + } + return copy +} + +func (ev *WebSocketEvent) GetData() map[string]interface{} { + return ev.data +} + +func (ev *WebSocketEvent) GetBroadcast() *WebsocketBroadcast { + return ev.broadcast +} + +func (ev *WebSocketEvent) GetSequence() int64 { + return ev.sequence +} + +func (ev *WebSocketEvent) SetEvent(event string) *WebSocketEvent { + copy := ev.Copy() + copy.event = event + return copy +} + +func (ev *WebSocketEvent) SetData(data map[string]interface{}) *WebSocketEvent { + copy := ev.Copy() + copy.data = data + return copy +} + +func (ev *WebSocketEvent) SetBroadcast(broadcast *WebsocketBroadcast) *WebSocketEvent { + copy := ev.Copy() + copy.broadcast = broadcast + return copy +} + +func (ev *WebSocketEvent) SetSequence(seq int64) *WebSocketEvent { + copy := ev.Copy() + copy.sequence = seq + return copy +} + +func (ev *WebSocketEvent) IsValid() bool { + return ev.event != "" +} + +func (ev *WebSocketEvent) EventType() string { + return ev.event +} + +func (ev *WebSocketEvent) ToJSON() ([]byte, error) { + if ev.precomputedJSON != nil { + return []byte(fmt.Sprintf(`{"event": %s, "data": %s, "broadcast": %s, "seq": %d}`, ev.precomputedJSON.Event, ev.precomputedJSON.Data, ev.precomputedJSON.Broadcast, ev.GetSequence())), nil + } + return json.Marshal(webSocketEventJSON{ + ev.event, + ev.data, + ev.broadcast, + ev.sequence, + }) +} + +// Encode encodes the event to the given encoder. +func (ev *WebSocketEvent) Encode(enc *json.Encoder) error { + if ev.precomputedJSON != nil { + return enc.Encode(json.RawMessage( + fmt.Sprintf(`{"event": %s, "data": %s, "broadcast": %s, "seq": %d}`, ev.precomputedJSON.Event, ev.precomputedJSON.Data, ev.precomputedJSON.Broadcast, ev.sequence), + )) + } + + return enc.Encode(webSocketEventJSON{ + ev.event, + ev.data, + ev.broadcast, + ev.sequence, + }) +} + +func WebSocketEventFromJSON(data io.Reader) (*WebSocketEvent, error) { + var ev WebSocketEvent + var o webSocketEventJSON + if err := json.NewDecoder(data).Decode(&o); err != nil { + return nil, err + } + ev.event = o.Event + if u, ok := o.Data["user"]; ok { + // We need to convert to and from JSON again + // because the user is in the form of a map[string]interface{}. + buf, err := json.Marshal(u) + if err != nil { + return nil, err + } + + var user User + if err = json.Unmarshal(buf, &user); err != nil { + return nil, err + } + o.Data["user"] = &user + } + ev.data = o.Data + ev.broadcast = o.Broadcast + ev.sequence = o.Sequence + return &ev, nil +} + +// WebSocketResponse represents a response received through the WebSocket +// for a request made to the server. This is available through the ResponseChannel +// channel in WebSocketClient. +type WebSocketResponse struct { + Status string `json:"status"` // The status of the response. For example: OK, FAIL. + SeqReply int64 `json:"seq_reply,omitempty"` // A counter which is incremented for every response sent. + Data map[string]interface{} `json:"data,omitempty"` // The data contained in the response. + Error *AppError `json:"error,omitempty"` // A field that is set if any error has occurred. +} + +func (m *WebSocketResponse) Add(key string, value interface{}) { + m.Data[key] = value +} + +func NewWebSocketResponse(status string, seqReply int64, data map[string]interface{}) *WebSocketResponse { + return &WebSocketResponse{Status: status, SeqReply: seqReply, Data: data} +} + +func NewWebSocketError(seqReply int64, err *AppError) *WebSocketResponse { + return &WebSocketResponse{Status: StatusFail, SeqReply: seqReply, Error: err} +} + +func (m *WebSocketResponse) IsValid() bool { + return m.Status != "" +} + +func (m *WebSocketResponse) EventType() string { + return WebsocketEventResponse +} + +func (m *WebSocketResponse) ToJSON() ([]byte, error) { + return json.Marshal(m) +} + +func WebSocketResponseFromJSON(data io.Reader) (*WebSocketResponse, error) { + var o *WebSocketResponse + return o, json.NewDecoder(data).Decode(&o) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_request.go b/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_request.go new file mode 100644 index 00000000..a7750bce --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_request.go @@ -0,0 +1,36 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "github.com/mattermost/mattermost-server/v6/shared/i18n" + + "github.com/vmihailenco/msgpack/v5" +) + +// WebSocketRequest represents a request made to the server through a websocket. +type WebSocketRequest struct { + // Client-provided fields + Seq int64 `json:"seq" msgpack:"seq"` // A counter which is incremented for every request made. + Action string `json:"action" msgpack:"action"` // The action to perform for a request. For example: get_statuses, user_typing. + Data map[string]interface{} `json:"data" msgpack:"data"` // The metadata for an action. + + // Server-provided fields + Session Session `json:"-" msgpack:"-"` + T i18n.TranslateFunc `json:"-" msgpack:"-"` + Locale string `json:"-" msgpack:"-"` +} + +func (o *WebSocketRequest) Clone() (*WebSocketRequest, error) { + buf, err := msgpack.Marshal(o) + if err != nil { + return nil, err + } + var ret WebSocketRequest + err = msgpack.Unmarshal(buf, &ret) + if err != nil { + return nil, err + } + return &ret, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/services/timezones/default.go b/vendor/github.com/mattermost/mattermost-server/v6/services/timezones/default.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/services/timezones/default.go rename to vendor/github.com/mattermost/mattermost-server/v6/services/timezones/default.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/services/timezones/timezones.go b/vendor/github.com/mattermost/mattermost-server/v6/services/timezones/timezones.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/services/timezones/timezones.go rename to vendor/github.com/mattermost/mattermost-server/v6/services/timezones/timezones.go diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/filesstore.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/filesstore.go new file mode 100644 index 00000000..c17ea1f6 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/filesstore.go @@ -0,0 +1,84 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package filestore + +import ( + "io" + "time" + + "github.com/pkg/errors" +) + +const ( + driverS3 = "amazons3" + driverLocal = "local" +) + +type ReadCloseSeeker interface { + io.ReadCloser + io.Seeker +} + +type FileBackend interface { + TestConnection() error + + Reader(path string) (ReadCloseSeeker, error) + ReadFile(path string) ([]byte, error) + FileExists(path string) (bool, error) + FileSize(path string) (int64, error) + CopyFile(oldPath, newPath string) error + MoveFile(oldPath, newPath string) error + WriteFile(fr io.Reader, path string) (int64, error) + AppendFile(fr io.Reader, path string) (int64, error) + RemoveFile(path string) error + FileModTime(path string) (time.Time, error) + + ListDirectory(path string) ([]string, error) + ListDirectoryRecursively(path string) ([]string, error) + RemoveDirectory(path string) error +} + +type FileBackendSettings struct { + DriverName string + Directory string + AmazonS3AccessKeyId string + AmazonS3SecretAccessKey string + AmazonS3Bucket string + AmazonS3PathPrefix string + AmazonS3Region string + AmazonS3Endpoint string + AmazonS3SSL bool + AmazonS3SignV2 bool + AmazonS3SSE bool + AmazonS3Trace bool +} + +func (settings *FileBackendSettings) CheckMandatoryS3Fields() error { + if settings.AmazonS3Bucket == "" { + return errors.New("missing s3 bucket settings") + } + + // if S3 endpoint is not set call the set defaults to set that + if settings.AmazonS3Endpoint == "" { + settings.AmazonS3Endpoint = "s3.amazonaws.com" + } + + return nil +} + +func NewFileBackend(settings FileBackendSettings) (FileBackend, error) { + switch settings.DriverName { + case driverS3: + backend, err := NewS3FileBackend(settings) + if err != nil { + return nil, errors.Wrap(err, "unable to connect to the s3 backend") + } + return backend, nil + case driverLocal: + return &LocalFileBackend{ + directory: settings.Directory, + }, nil + } + return nil, errors.New("no valid filestorage driver found") +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/localstore.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/localstore.go new file mode 100644 index 00000000..e2d53e49 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/localstore.go @@ -0,0 +1,240 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package filestore + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + + "github.com/mattermost/mattermost-server/v6/shared/mlog" +) + +const ( + TestFilePath = "/testfile" +) + +type LocalFileBackend struct { + directory string +} + +// copyFile will copy a file from src path to dst path. +// Overwrites any existing files at dst. +// Permissions are copied from file at src to the new file at dst. +func copyFile(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + + if err = os.MkdirAll(filepath.Dir(dst), os.ModePerm); err != nil { + return + } + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + if e := out.Close(); e != nil { + err = e + } + }() + + _, err = io.Copy(out, in) + if err != nil { + return + } + + err = out.Sync() + if err != nil { + return + } + + stat, err := os.Stat(src) + if err != nil { + return + } + err = os.Chmod(dst, stat.Mode()) + if err != nil { + return + } + + return +} + +func (b *LocalFileBackend) TestConnection() error { + f := bytes.NewReader([]byte("testingwrite")) + if _, err := writeFileLocally(f, filepath.Join(b.directory, TestFilePath)); err != nil { + return errors.Wrap(err, "unable to write to the local filesystem storage") + } + os.Remove(filepath.Join(b.directory, TestFilePath)) + mlog.Debug("Able to write files to local storage.") + return nil +} + +func (b *LocalFileBackend) Reader(path string) (ReadCloseSeeker, error) { + f, err := os.Open(filepath.Join(b.directory, path)) + if err != nil { + return nil, errors.Wrapf(err, "unable to open file %s", path) + } + return f, nil +} + +func (b *LocalFileBackend) ReadFile(path string) ([]byte, error) { + f, err := ioutil.ReadFile(filepath.Join(b.directory, path)) + if err != nil { + return nil, errors.Wrapf(err, "unable to read file %s", path) + } + return f, nil +} + +func (b *LocalFileBackend) FileExists(path string) (bool, error) { + _, err := os.Stat(filepath.Join(b.directory, path)) + + if os.IsNotExist(err) { + return false, nil + } + + if err != nil { + return false, errors.Wrapf(err, "unable to know if file %s exists", path) + } + return true, nil +} + +func (b *LocalFileBackend) FileSize(path string) (int64, error) { + info, err := os.Stat(filepath.Join(b.directory, path)) + if err != nil { + return 0, errors.Wrapf(err, "unable to get file size for %s", path) + } + return info.Size(), nil +} + +func (b *LocalFileBackend) FileModTime(path string) (time.Time, error) { + info, err := os.Stat(filepath.Join(b.directory, path)) + if err != nil { + return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path) + } + return info.ModTime(), nil +} + +func (b *LocalFileBackend) CopyFile(oldPath, newPath string) error { + if err := copyFile(filepath.Join(b.directory, oldPath), filepath.Join(b.directory, newPath)); err != nil { + return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath) + } + return nil +} + +func (b *LocalFileBackend) MoveFile(oldPath, newPath string) error { + if err := os.MkdirAll(filepath.Dir(filepath.Join(b.directory, newPath)), 0750); err != nil { + return errors.Wrapf(err, "unable to create the new destination directory %s", filepath.Dir(newPath)) + } + + if err := os.Rename(filepath.Join(b.directory, oldPath), filepath.Join(b.directory, newPath)); err != nil { + return errors.Wrapf(err, "unable to move the file to %s to the destination directory", newPath) + } + + return nil +} + +func (b *LocalFileBackend) WriteFile(fr io.Reader, path string) (int64, error) { + return writeFileLocally(fr, filepath.Join(b.directory, path)) +} + +func writeFileLocally(fr io.Reader, path string) (int64, error) { + if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { + directory, _ := filepath.Abs(filepath.Dir(path)) + return 0, errors.Wrapf(err, "unable to create the directory %s for the file %s", directory, path) + } + fw, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return 0, errors.Wrapf(err, "unable to open the file %s to write the data", path) + } + defer fw.Close() + written, err := io.Copy(fw, fr) + if err != nil { + return written, errors.Wrapf(err, "unable write the data in the file %s", path) + } + return written, nil +} + +func (b *LocalFileBackend) AppendFile(fr io.Reader, path string) (int64, error) { + fp := filepath.Join(b.directory, path) + if _, err := os.Stat(fp); err != nil { + return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path) + } + fw, err := os.OpenFile(fp, os.O_WRONLY|os.O_APPEND, 0600) + if err != nil { + return 0, errors.Wrapf(err, "unable to open the file %s to append the data", path) + } + defer fw.Close() + written, err := io.Copy(fw, fr) + if err != nil { + return written, errors.Wrapf(err, "unable append the data in the file %s", path) + } + return written, nil +} + +func (b *LocalFileBackend) RemoveFile(path string) error { + if err := os.Remove(filepath.Join(b.directory, path)); err != nil { + return errors.Wrapf(err, "unable to remove the file %s", path) + } + return nil +} + +// basePath: path to get to the file but won't be added to the end result +// path: basePath+path current directory we are looking at +// maxDepth: parameter to prevent infinite recursion, once this is reached we won't look any further +func appendRecursively(basePath, path string, maxDepth int) ([]string, error) { + results := []string{} + dirEntries, err := os.ReadDir(filepath.Join(basePath, path)) + if err != nil { + if os.IsNotExist(err) { + return results, nil + } + return results, errors.Wrapf(err, "unable to list the directory %s", path) + } + for _, dirEntry := range dirEntries { + entryName := dirEntry.Name() + entryPath := filepath.Join(path, entryName) + if entryName == "." || entryName == ".." || entryPath == path { + continue + } + if dirEntry.IsDir() { + if maxDepth <= 0 { + mlog.Warn("Max Depth reached", mlog.String("path", entryPath)) + results = append(results, entryPath) + continue // we'll ignore it if max depth is reached. + } + nestedResults, err := appendRecursively(basePath, entryPath, maxDepth-1) + if err != nil { + return results, err + } + results = append(results, nestedResults...) + } else { + results = append(results, entryPath) + } + } + return results, nil +} + +func (b *LocalFileBackend) ListDirectory(path string) ([]string, error) { + return appendRecursively(b.directory, path, 0) +} + +func (b *LocalFileBackend) ListDirectoryRecursively(path string) ([]string, error) { + return appendRecursively(b.directory, path, 10) +} + +func (b *LocalFileBackend) RemoveDirectory(path string) error { + if err := os.RemoveAll(filepath.Join(b.directory, path)); err != nil { + return errors.Wrapf(err, "unable to remove the directory %s", path) + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3_overrides.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3_overrides.go new file mode 100644 index 00000000..697809ee --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3_overrides.go @@ -0,0 +1,56 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package filestore + +import ( + "context" + "net/http" + + "github.com/minio/minio-go/v7/pkg/credentials" +) + +// customTransport is used to point the request to a different server. +// This is helpful in situations where a different service is handling AWS S3 requests +// from multiple Mattermost applications, and the Mattermost service itself does not +// have any S3 credentials. +type customTransport struct { + base http.RoundTripper + host string + scheme string + client http.Client +} + +// RoundTrip implements the http.Roundtripper interface. +func (t *customTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // Roundtrippers should not modify the original request. + newReq := req.Clone(context.Background()) + *newReq.URL = *req.URL + req.URL.Scheme = t.scheme + req.URL.Host = t.host + return t.client.Do(req) +} + +// customProvider is a dummy credentials provider for the minio client to work +// without actually providing credentials. This is needed with a custom transport +// in cases where the minio client does not actually have credentials with itself, +// rather needs responses from another entity. +// +// It satisfies the credentials.Provider interface. +type customProvider struct { + isSignV2 bool +} + +// Retrieve just returns empty credentials. +func (cp customProvider) Retrieve() (credentials.Value, error) { + sign := credentials.SignatureV4 + if cp.isSignV2 { + sign = credentials.SignatureV2 + } + return credentials.Value{ + SignerType: sign, + }, nil +} + +// IsExpired always returns false. +func (cp customProvider) IsExpired() bool { return false } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go new file mode 100644 index 00000000..d162daa2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go @@ -0,0 +1,463 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package filestore + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + s3 "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/pkg/errors" + + "github.com/mattermost/mattermost-server/v6/shared/mlog" +) + +// S3FileBackend contains all necessary information to communicate with +// an AWS S3 compatible API backend. +type S3FileBackend struct { + endpoint string + accessKey string + secretKey string + secure bool + signV2 bool + region string + bucket string + pathPrefix string + encrypt bool + trace bool + client *s3.Client +} + +type S3FileBackendAuthError struct { + DetailedError string +} + +// S3FileBackendNoBucketError is returned when testing a connection and no S3 bucket is found +type S3FileBackendNoBucketError struct{} + +const ( + // This is not exported by minio. See: https://github.com/minio/minio-go/issues/1339 + bucketNotFound = "NoSuchBucket" +) + +var ( + imageExtensions = map[string]bool{".jpg": true, ".jpeg": true, ".gif": true, ".bmp": true, ".png": true, ".tiff": true, "tif": true} + imageMimeTypes = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff", ".tif": "image/tif"} +) + +func isFileExtImage(ext string) bool { + ext = strings.ToLower(ext) + return imageExtensions[ext] +} + +func getImageMimeType(ext string) string { + ext = strings.ToLower(ext) + if imageMimeTypes[ext] == "" { + return "image" + } + return imageMimeTypes[ext] +} + +func (s *S3FileBackendAuthError) Error() string { + return s.DetailedError +} + +func (s *S3FileBackendNoBucketError) Error() string { + return "no such bucket" +} + +// NewS3FileBackend returns an instance of an S3FileBackend. +func NewS3FileBackend(settings FileBackendSettings) (*S3FileBackend, error) { + backend := &S3FileBackend{ + endpoint: settings.AmazonS3Endpoint, + accessKey: settings.AmazonS3AccessKeyId, + secretKey: settings.AmazonS3SecretAccessKey, + secure: settings.AmazonS3SSL, + signV2: settings.AmazonS3SignV2, + region: settings.AmazonS3Region, + bucket: settings.AmazonS3Bucket, + pathPrefix: settings.AmazonS3PathPrefix, + encrypt: settings.AmazonS3SSE, + trace: settings.AmazonS3Trace, + } + cli, err := backend.s3New() + if err != nil { + return nil, err + } + backend.client = cli + return backend, nil +} + +// Similar to s3.New() but allows initialization of signature v2 or signature v4 client. +// If signV2 input is false, function always returns signature v4. +// +// Additionally this function also takes a user defined region, if set +// disables automatic region lookup. +func (b *S3FileBackend) s3New() (*s3.Client, error) { + var creds *credentials.Credentials + + isCloud := os.Getenv("MM_CLOUD_FILESTORE_BIFROST") != "" + if isCloud { + creds = credentials.New(customProvider{isSignV2: b.signV2}) + } else if b.accessKey == "" && b.secretKey == "" { + creds = credentials.NewIAM("") + } else if b.signV2 { + creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV2) + } else { + creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV4) + } + + opts := s3.Options{ + Creds: creds, + Secure: b.secure, + Region: b.region, + } + + // If this is a cloud installation, we override the default transport. + if isCloud { + tr, err := s3.DefaultTransport(b.secure) + if err != nil { + return nil, err + } + scheme := "http" + if b.secure { + scheme = "https" + } + opts.Transport = &customTransport{ + base: tr, + host: b.endpoint, + scheme: scheme, + } + } + + s3Clnt, err := s3.New(b.endpoint, &opts) + if err != nil { + return nil, err + } + + if b.trace { + s3Clnt.TraceOn(os.Stdout) + } + + return s3Clnt, nil +} + +func (b *S3FileBackend) TestConnection() error { + exists := true + var err error + // If a path prefix is present, we attempt to test the bucket by listing objects under the path + // and just checking the first response. This is because the BucketExists call is only at a bucket level + // and sometimes the user might only be allowed access to the specified path prefix. + if b.pathPrefix != "" { + obj := <-b.client.ListObjects(context.Background(), b.bucket, s3.ListObjectsOptions{Prefix: b.pathPrefix}) + if obj.Err != nil { + typedErr := s3.ToErrorResponse(obj.Err) + if typedErr.Code != bucketNotFound { + return &S3FileBackendAuthError{DetailedError: "unable to list objects in the S3 bucket"} + } + exists = false + } + } else { + exists, err = b.client.BucketExists(context.Background(), b.bucket) + if err != nil { + return &S3FileBackendAuthError{DetailedError: "unable to check if the S3 bucket exists"} + } + } + + if !exists { + return &S3FileBackendNoBucketError{} + } + mlog.Debug("Connection to S3 or minio is good. Bucket exists.") + return nil +} + +func (b *S3FileBackend) MakeBucket() error { + err := b.client.MakeBucket(context.Background(), b.bucket, s3.MakeBucketOptions{Region: b.region}) + if err != nil { + return errors.Wrap(err, "unable to create the s3 bucket") + } + return nil +} + +// Caller must close the first return value +func (b *S3FileBackend) Reader(path string) (ReadCloseSeeker, error) { + path = filepath.Join(b.pathPrefix, path) + minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "unable to open file %s", path) + } + + return minioObject, nil +} + +func (b *S3FileBackend) ReadFile(path string) ([]byte, error) { + path = filepath.Join(b.pathPrefix, path) + minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "unable to open file %s", path) + } + + defer minioObject.Close() + f, err := ioutil.ReadAll(minioObject) + if err != nil { + return nil, errors.Wrapf(err, "unable to read file %s", path) + } + return f, nil +} + +func (b *S3FileBackend) FileExists(path string) (bool, error) { + path = filepath.Join(b.pathPrefix, path) + + _, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{}) + if err == nil { + return true, nil + } + + var s3Err s3.ErrorResponse + if errors.As(err, &s3Err); s3Err.Code == "NoSuchKey" { + return false, nil + } + + return false, errors.Wrapf(err, "unable to know if file %s exists", path) +} + +func (b *S3FileBackend) FileSize(path string) (int64, error) { + path = filepath.Join(b.pathPrefix, path) + + info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{}) + if err != nil { + return 0, errors.Wrapf(err, "unable to get file size for %s", path) + } + + return info.Size, nil +} + +func (b *S3FileBackend) FileModTime(path string) (time.Time, error) { + path = filepath.Join(b.pathPrefix, path) + + info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{}) + if err != nil { + return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path) + } + + return info.LastModified, nil +} + +func (b *S3FileBackend) CopyFile(oldPath, newPath string) error { + oldPath = filepath.Join(b.pathPrefix, oldPath) + newPath = filepath.Join(b.pathPrefix, newPath) + srcOpts := s3.CopySrcOptions{ + Bucket: b.bucket, + Object: oldPath, + } + if b.encrypt { + srcOpts.Encryption = encrypt.NewSSE() + } + + dstOpts := s3.CopyDestOptions{ + Bucket: b.bucket, + Object: newPath, + } + if b.encrypt { + dstOpts.Encryption = encrypt.NewSSE() + } + + if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { + return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath) + } + + return nil +} + +func (b *S3FileBackend) MoveFile(oldPath, newPath string) error { + oldPath = filepath.Join(b.pathPrefix, oldPath) + newPath = filepath.Join(b.pathPrefix, newPath) + srcOpts := s3.CopySrcOptions{ + Bucket: b.bucket, + Object: oldPath, + } + if b.encrypt { + srcOpts.Encryption = encrypt.NewSSE() + } + + dstOpts := s3.CopyDestOptions{ + Bucket: b.bucket, + Object: newPath, + } + if b.encrypt { + dstOpts.Encryption = encrypt.NewSSE() + } + + if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { + return errors.Wrapf(err, "unable to copy the file to %s to the new destination", newPath) + } + + if err := b.client.RemoveObject(context.Background(), b.bucket, oldPath, s3.RemoveObjectOptions{}); err != nil { + return errors.Wrapf(err, "unable to remove the file old file %s", oldPath) + } + + return nil +} + +func (b *S3FileBackend) WriteFile(fr io.Reader, path string) (int64, error) { + var contentType string + path = filepath.Join(b.pathPrefix, path) + if ext := filepath.Ext(path); isFileExtImage(ext) { + contentType = getImageMimeType(ext) + } else { + contentType = "binary/octet-stream" + } + + options := s3PutOptions(b.encrypt, contentType) + info, err := b.client.PutObject(context.Background(), b.bucket, path, fr, -1, options) + if err != nil { + return info.Size, errors.Wrapf(err, "unable write the data in the file %s", path) + } + + return info.Size, nil +} + +func (b *S3FileBackend) AppendFile(fr io.Reader, path string) (int64, error) { + fp := filepath.Join(b.pathPrefix, path) + if _, err := b.client.StatObject(context.Background(), b.bucket, fp, s3.StatObjectOptions{}); err != nil { + return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path) + } + + var contentType string + if ext := filepath.Ext(fp); isFileExtImage(ext) { + contentType = getImageMimeType(ext) + } else { + contentType = "binary/octet-stream" + } + + options := s3PutOptions(b.encrypt, contentType) + sse := options.ServerSideEncryption + partName := fp + ".part" + info, err := b.client.PutObject(context.Background(), b.bucket, partName, fr, -1, options) + defer b.client.RemoveObject(context.Background(), b.bucket, partName, s3.RemoveObjectOptions{}) + if info.Size > 0 { + src1Opts := s3.CopySrcOptions{ + Bucket: b.bucket, + Object: fp, + } + src2Opts := s3.CopySrcOptions{ + Bucket: b.bucket, + Object: partName, + } + dstOpts := s3.CopyDestOptions{ + Bucket: b.bucket, + Object: fp, + Encryption: sse, + } + _, err = b.client.ComposeObject(context.Background(), dstOpts, src1Opts, src2Opts) + if err != nil { + return 0, errors.Wrapf(err, "unable append the data in the file %s", path) + } + return info.Size, nil + } + + return 0, errors.Wrapf(err, "unable append the data in the file %s", path) +} + +func (b *S3FileBackend) RemoveFile(path string) error { + path = filepath.Join(b.pathPrefix, path) + if err := b.client.RemoveObject(context.Background(), b.bucket, path, s3.RemoveObjectOptions{}); err != nil { + return errors.Wrapf(err, "unable to remove the file %s", path) + } + + return nil +} + +func getPathsFromObjectInfos(in <-chan s3.ObjectInfo) <-chan s3.ObjectInfo { + out := make(chan s3.ObjectInfo, 1) + + go func() { + defer close(out) + + for { + info, done := <-in + + if !done { + break + } + + out <- info + } + }() + + return out +} + +func (b *S3FileBackend) listDirectory(path string, recursion bool) ([]string, error) { + path = filepath.Join(b.pathPrefix, path) + if !strings.HasSuffix(path, "/") && path != "" { + // s3Clnt returns only the path itself when "/" is not present + // appending "/" to make it consistent across all filestores + path = path + "/" + } + + opts := s3.ListObjectsOptions{ + Prefix: path, + Recursive: recursion, + } + var paths []string + for object := range b.client.ListObjects(context.Background(), b.bucket, opts) { + if object.Err != nil { + return nil, errors.Wrapf(object.Err, "unable to list the directory %s", path) + } + // We strip the path prefix that gets applied, + // so that it remains transparent to the application. + object.Key = strings.TrimPrefix(object.Key, b.pathPrefix) + trimmed := strings.Trim(object.Key, "/") + if trimmed != "" { + paths = append(paths, trimmed) + } + } + + return paths, nil +} + +func (b *S3FileBackend) ListDirectory(path string) ([]string, error) { + return b.listDirectory(path, false) +} + +func (b *S3FileBackend) ListDirectoryRecursively(path string) ([]string, error) { + return b.listDirectory(path, true) +} + +func (b *S3FileBackend) RemoveDirectory(path string) error { + opts := s3.ListObjectsOptions{ + Prefix: filepath.Join(b.pathPrefix, path), + Recursive: true, + } + list := b.client.ListObjects(context.Background(), b.bucket, opts) + objectsCh := b.client.RemoveObjects(context.Background(), b.bucket, getPathsFromObjectInfos(list), s3.RemoveObjectsOptions{}) + for err := range objectsCh { + if err.Err != nil { + return errors.Wrapf(err.Err, "unable to remove the directory %s", path) + } + } + + return nil +} + +func s3PutOptions(encrypted bool, contentType string) s3.PutObjectOptions { + options := s3.PutObjectOptions{} + if encrypted { + options.ServerSideEncryption = encrypt.NewSSE() + } + options.ContentType = contentType + // We set the part size to the minimum allowed value of 5MBs + // to avoid an excessive allocation in minio.PutObject implementation. + options.PartSize = 1024 * 1024 * 5 + + return options +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/i18n/i18n.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/i18n/i18n.go new file mode 100644 index 00000000..0168b751 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/i18n/i18n.go @@ -0,0 +1,185 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package i18n + +import ( + "fmt" + "html/template" + "io/ioutil" + "net/http" + "path/filepath" + "reflect" + "strings" + + "github.com/mattermost/go-i18n/i18n" + + "github.com/mattermost/mattermost-server/v6/shared/mlog" +) + +const defaultLocale = "en" + +// TranslateFunc is the type of the translate functions +type TranslateFunc func(translationID string, args ...interface{}) string + +// T is the translate function using the default server language as fallback language +var T TranslateFunc + +// TDefault is the translate function using english as fallback language +var TDefault TranslateFunc + +var locales map[string]string = make(map[string]string) +var defaultServerLocale string +var defaultClientLocale string + +// TranslationsPreInit loads translations from filesystem if they are not +// loaded already and assigns english while loading server config +func TranslationsPreInit(translationsDir string) error { + if T != nil { + return nil + } + + // Set T even if we fail to load the translations. Lots of shutdown handling code will + // segfault trying to handle the error, and the untranslated IDs are strictly better. + T = tfuncWithFallback(defaultLocale) + TDefault = tfuncWithFallback(defaultLocale) + + return initTranslationsWithDir(translationsDir) +} + +// InitTranslations set the defaults configured in the server and initialize +// the T function using the server default as fallback language +func InitTranslations(serverLocale, clientLocale string) error { + defaultServerLocale = serverLocale + defaultClientLocale = clientLocale + + var err error + T, err = getTranslationsBySystemLocale() + return err +} + +func initTranslationsWithDir(dir string) error { + files, _ := ioutil.ReadDir(dir) + for _, f := range files { + if filepath.Ext(f.Name()) == ".json" { + filename := f.Name() + locales[strings.Split(filename, ".")[0]] = filepath.Join(dir, filename) + + if err := i18n.LoadTranslationFile(filepath.Join(dir, filename)); err != nil { + return err + } + } + } + + return nil +} + +func getTranslationsBySystemLocale() (TranslateFunc, error) { + locale := defaultServerLocale + if _, ok := locales[locale]; !ok { + mlog.Warn("Failed to load system translations for", mlog.String("locale", locale), mlog.String("attempting to fall back to default locale", defaultLocale)) + locale = defaultLocale + } + + if locales[locale] == "" { + return nil, fmt.Errorf("failed to load system translations for '%v'", defaultLocale) + } + + translations := tfuncWithFallback(locale) + if translations == nil { + return nil, fmt.Errorf("failed to load system translations") + } + + mlog.Info("Loaded system translations", mlog.String("for locale", locale), mlog.String("from locale", locales[locale])) + return translations, nil +} + +// GetUserTranslations get the translation function for an specific locale +func GetUserTranslations(locale string) TranslateFunc { + if _, ok := locales[locale]; !ok { + locale = defaultLocale + } + + translations := tfuncWithFallback(locale) + return translations +} + +// GetTranslationsAndLocaleFromRequest return the translation function and the +// locale based on a request headers +func GetTranslationsAndLocaleFromRequest(r *http.Request) (TranslateFunc, string) { + // This is for checking against locales like pt_BR or zn_CN + headerLocaleFull := strings.Split(r.Header.Get("Accept-Language"), ",")[0] + // This is for checking against locales like en, es + headerLocale := strings.Split(strings.Split(r.Header.Get("Accept-Language"), ",")[0], "-")[0] + defaultLocale := defaultClientLocale + if locales[headerLocaleFull] != "" { + translations := tfuncWithFallback(headerLocaleFull) + return translations, headerLocaleFull + } else if locales[headerLocale] != "" { + translations := tfuncWithFallback(headerLocale) + return translations, headerLocale + } else if locales[defaultLocale] != "" { + translations := tfuncWithFallback(defaultLocale) + return translations, headerLocale + } + + translations := tfuncWithFallback(defaultLocale) + return translations, defaultLocale +} + +// GetSupportedLocales return a map of locale code and the file path with the +// translations +func GetSupportedLocales() map[string]string { + return locales +} + +func tfuncWithFallback(pref string) TranslateFunc { + t, _ := i18n.Tfunc(pref) + return func(translationID string, args ...interface{}) string { + if translated := t(translationID, args...); translated != translationID { + return translated + } + + t, _ := i18n.Tfunc(defaultLocale) + return t(translationID, args...) + } +} + +// TranslateAsHTML translates the translationID provided and return a +// template.HTML object +func TranslateAsHTML(t TranslateFunc, translationID string, args map[string]interface{}) template.HTML { + message := t(translationID, escapeForHTML(args)) + message = strings.Replace(message, "[[", "", -1) + message = strings.Replace(message, "]]", "", -1) + return template.HTML(message) +} + +func escapeForHTML(arg interface{}) interface{} { + switch typedArg := arg.(type) { + case string: + return template.HTMLEscapeString(typedArg) + case *string: + return template.HTMLEscapeString(*typedArg) + case map[string]interface{}: + safeArg := make(map[string]interface{}, len(typedArg)) + for key, value := range typedArg { + safeArg[key] = escapeForHTML(value) + } + return safeArg + default: + mlog.Warn( + "Unable to escape value for HTML template", + mlog.Any("html_template", arg), + mlog.String("template_type", reflect.ValueOf(arg).Type().String()), + ) + return "" + } +} + +// IdentityTfunc returns a translation function that don't translate, only +// returns the same id +func IdentityTfunc() TranslateFunc { + return func(translationID string, args ...interface{}) string { + return translationID + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/autolink.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/autolink.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/autolink.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/autolink.go index 14180836..2eb05d90 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/autolink.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/autolink.go @@ -13,7 +13,8 @@ import ( // Based off of extensions/autolink.c from https://github.com/github/cmark var ( - DefaultUrlSchemes = []string{"http", "https", "ftp", "mailto", "tel"} + DefaultURLSchemes = []string{"http", "https", "ftp", "mailto", "tel"} + wwwAutoLinkRegex = regexp.MustCompile(`^www\d{0,3}\.`) ) // Given a string with a w at the given position, tries to parse and return a range containing a www link. @@ -30,7 +31,7 @@ func parseWWWAutolink(data string, position int) (Range, bool) { } // Check that this starts with www - if len(data)-position < 4 || !regexp.MustCompile(`^www\d{0,3}\.`).MatchString(data[position:]) { + if len(data)-position < 4 || !wwwAutoLinkRegex.MatchString(data[position:]) { return Range{}, false } @@ -59,9 +60,8 @@ func isAllowedBeforeWWWLink(c byte) bool { switch c { case '*', '_', '~', ')': return true - default: - return false } + return false } // Given a string with a : at the given position, tried to parse and return a range containing a URL scheme @@ -111,7 +111,7 @@ func parseURLAutolink(data string, position int) (Range, bool) { func isSchemeAllowed(scheme string) bool { // Note that this doesn't support the custom URL schemes implemented by the client - for _, allowed := range DefaultUrlSchemes { + for _, allowed := range DefaultURLSchemes { if strings.EqualFold(allowed, scheme) { return true } @@ -153,9 +153,8 @@ func checkDomain(data string, allowShort bool) int { // this is called from parseWWWAutolink if foundPeriod { return i - } else { - return 0 } + return 0 } // Returns true if the provided link starts with a valid character for a domain name. Equivalent to @@ -251,7 +250,6 @@ func canEndAutolink(c rune) bool { switch c { case '?', '!', '.', ',', ':', '*', '_', '~', '\'', '"': return false - default: - return true } + return true } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/block_quote.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/block_quote.go similarity index 92% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/block_quote.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/block_quote.go index 6ae2ff44..5cf66d10 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/block_quote.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/block_quote.go @@ -34,7 +34,7 @@ func (b *BlockQuote) AddChild(openBlocks []Block) []Block { return openBlocks } -func blockQuoteStart(markdown string, indent int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block { +func blockQuoteStart(markdown string, indent int, r Range) []Block { if indent > 3 { return nil } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/blocks.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/blocks.go similarity index 85% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/blocks.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/blocks.go index 44ee178d..fe9e272f 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/blocks.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/blocks.go @@ -37,13 +37,14 @@ type Range struct { End int } -func closeBlocks(blocks []Block, referenceDefinitions *[]*ReferenceDefinition) { +func closeBlocks(blocks []Block, referenceDefinitions []*ReferenceDefinition) []*ReferenceDefinition { for _, block := range blocks { block.Close() if p, ok := block.(*Paragraph); ok && len(p.ReferenceDefinitions) > 0 { - *referenceDefinitions = append(*referenceDefinitions, p.ReferenceDefinitions...) + referenceDefinitions = append(referenceDefinitions, p.ReferenceDefinitions...) } } + return referenceDefinitions } func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefinition) { @@ -78,7 +79,7 @@ func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefiniti for i := lastMatchIndex; i >= 0; i-- { if container, ok := openBlocks[i].(ContainerBlock); ok { if addedBlocks := container.AddChild(newBlocks); addedBlocks != nil { - closeBlocks(openBlocks[i+1:], &referenceDefinitions) + referenceDefinitions = closeBlocks(openBlocks[i+1:], referenceDefinitions) openBlocks = openBlocks[:i+1] openBlocks = append(openBlocks, addedBlocks...) didAdd = true @@ -98,7 +99,7 @@ func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefiniti continue } - closeBlocks(openBlocks[lastMatchIndex+1:], &referenceDefinitions) + referenceDefinitions = closeBlocks(openBlocks[lastMatchIndex+1:], referenceDefinitions) openBlocks = openBlocks[:lastMatchIndex+1] if openBlocks[lastMatchIndex].AddLine(indentation, r) { @@ -109,7 +110,7 @@ func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefiniti for i := lastMatchIndex; i >= 0; i-- { if container, ok := openBlocks[i].(ContainerBlock); ok { if newBlocks := container.AddChild([]Block{paragraph}); newBlocks != nil { - closeBlocks(openBlocks[i+1:], &referenceDefinitions) + referenceDefinitions = closeBlocks(openBlocks[i+1:], referenceDefinitions) openBlocks = openBlocks[:i+1] openBlocks = append(openBlocks, newBlocks...) break @@ -119,7 +120,7 @@ func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefiniti } } - closeBlocks(openBlocks, &referenceDefinitions) + referenceDefinitions = closeBlocks(openBlocks, referenceDefinitions) return document, referenceDefinitions } @@ -129,13 +130,13 @@ func blockStart(markdown string, indentation int, r Range, matchedBlocks, unmatc return nil } - if start := blockQuoteStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil { + if start := blockQuoteStart(markdown, indentation, r); start != nil { return start } else if start := listStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil { return start } else if start := indentedCodeStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil { return start - } else if start := fencedCodeStart(markdown, indentation, r, matchedBlocks, unmatchedBlocks); start != nil { + } else if start := fencedCodeStart(markdown, indentation, r); start != nil { return start } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/document.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/document.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/document.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/document.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/fenced_code.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/fenced_code.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/fenced_code.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/fenced_code.go index 4fd97fd0..c8caad55 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/fenced_code.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/fenced_code.go @@ -78,7 +78,7 @@ func (b *FencedCode) AllowsBlockStarts() bool { return false } -func fencedCodeStart(markdown string, indentation int, r Range, matchedBlocks, unmatchedBlocks []Block) []Block { +func fencedCodeStart(markdown string, indentation int, r Range) []Block { s := markdown[r.Position:r.End] if !strings.HasPrefix(s, "```") && !strings.HasPrefix(s, "~~~") { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/html.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/html.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html_entities.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/html_entities.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html_entities.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/html_entities.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/indented_code.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/indented_code.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/indented_code.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/indented_code.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inlines.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inlines.go similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inlines.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inlines.go index 4303607f..43dee3bd 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inlines.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inlines.go @@ -406,10 +406,9 @@ func (p *inlineParser) lookForLinkOrImage() { } p.delimiterStack.Remove(element) return - } else { - p.delimiterStack.Remove(element) - break } + p.delimiterStack.Remove(element) + break } absPos := relativeToAbsolutePosition(p.ranges, p.position) p.inlines = append(p.inlines, &Text{ @@ -595,7 +594,7 @@ func ParseInlines(markdown string, ranges []Range, referenceDefinitions []*Refer } func MergeInlineText(inlines []Inline) []Inline { - var ret []Inline + ret := inlines[:0] for i, v := range inlines { // always add first node if i == 0 { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inspect.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inspect.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inspect.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/inspect.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/lines.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/lines.go similarity index 83% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/lines.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/lines.go index a0a64491..f59e5afe 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/lines.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/lines.go @@ -3,13 +3,18 @@ package markdown +import ( + "strings" +) + type Line struct { Range } -func ParseLines(markdown string) (lines []Line) { +func ParseLines(markdown string) []Line { lineStartPosition := 0 isAfterCarriageReturn := false + lines := make([]Line, 0, strings.Count(markdown, "\n")) for position, r := range markdown { if r == '\n' { lines = append(lines, Line{Range{lineStartPosition, position + 1}}) @@ -23,5 +28,5 @@ func ParseLines(markdown string) (lines []Line) { if lineStartPosition < len(markdown) { lines = append(lines, Line{Range{lineStartPosition, len(markdown)}}) } - return + return lines } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/links.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/links.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/links.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/links.go index df4aa748..6aa56f25 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/links.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/links.go @@ -147,7 +147,7 @@ func parseImageDimensions(markdown string, position int) (raw Range, next int, o // Read width hasWidth := false - for isNumericByte(markdown[position]) { + for position < len(markdown)-1 && isNumericByte(markdown[position]) { hasWidth = true position += 1 } @@ -158,14 +158,14 @@ func parseImageDimensions(markdown string, position int) (raw Range, next int, o } // Read the x - if markdown[position] != 'x' && markdown[position] != 'X' { + if (markdown[position] != 'x' && markdown[position] != 'X') || position == len(markdown)-1 { return } position += 1 // Read height hasHeight := false - for isNumericByte(markdown[position]) { + for position < len(markdown)-1 && isNumericByte(markdown[position]) { hasHeight = true position += 1 } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/list.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/list.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/list.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/list.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/markdown.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/markdown.go similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/markdown.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/markdown.go index a9879cee..5ccdad8c 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/markdown.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/markdown.go @@ -23,9 +23,8 @@ func isWhitespace(c rune) bool { switch c { case ' ', '\t', '\n', '\u000b', '\u000c', '\r': return true - default: - return false } + return false } func isWhitespaceByte(c byte) bool { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/paragraph.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/paragraph.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/paragraph.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/paragraph.go diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/reference_definition.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/reference_definition.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/reference_definition.go rename to vendor/github.com/mattermost/mattermost-server/v6/shared/markdown/reference_definition.go diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/default.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/default.go new file mode 100644 index 00000000..0567c016 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/default.go @@ -0,0 +1,63 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "bytes" + "encoding/json" + "fmt" + "os" +) + +// defaultLog manually encodes the log to STDERR, providing a basic, default logging implementation +// before mlog is fully configured. +func defaultLog(level Level, msg string, fields ...Field) { + mFields := make(map[string]string) + buf := &bytes.Buffer{} + + for _, fld := range fields { + buf.Reset() + fld.ValueString(buf, shouldQuote) + mFields[fld.Key] = buf.String() + } + + log := struct { + Level string `json:"level"` + Message string `json:"msg"` + Fields map[string]string `json:"fields,omitempty"` + }{ + level.Name, + msg, + mFields, + } + + if b, err := json.Marshal(log); err != nil { + fmt.Fprintf(os.Stderr, `{"level":"error","msg":"failed to encode log message"}%s`, "\n") + } else { + fmt.Fprintf(os.Stderr, "%s\n", b) + } +} + +func defaultIsLevelEnabled(level Level) bool { + return true +} + +func defaultCustomMultiLog(lvl []Level, msg string, fields ...Field) { + for _, level := range lvl { + defaultLog(level, msg, fields...) + } +} + +// shouldQuote returns true if val contains any characters that require quotations. +func shouldQuote(val string) bool { + for _, c := range val { + if !((c >= '0' && c <= '9') || + (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + c == '-' || c == '.' || c == '_' || c == '/' || c == '@' || c == '^' || c == '+') { + return true + } + } + return false +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/global.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/global.go new file mode 100644 index 00000000..de346123 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/global.go @@ -0,0 +1,132 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "sync" +) + +var ( + globalLogger *Logger + muxGlobalLogger sync.RWMutex +) + +func InitGlobalLogger(logger *Logger) { + muxGlobalLogger.Lock() + defer muxGlobalLogger.Unlock() + + globalLogger = logger +} + +func getGlobalLogger() *Logger { + muxGlobalLogger.RLock() + defer muxGlobalLogger.RUnlock() + + return globalLogger +} + +// IsLevelEnabled returns true only if at least one log target is +// configured to emit the specified log level. Use this check when +// gathering the log info may be expensive. +// +// Note, transformations and serializations done via fields are already +// lazily evaluated and don't require this check beforehand. +func IsLevelEnabled(level Level) bool { + logger := getGlobalLogger() + if logger == nil { + return defaultIsLevelEnabled(level) + } + return logger.IsLevelEnabled(level) +} + +// Log emits the log record for any targets configured for the specified level. +func Log(level Level, msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultLog(level, msg, fields...) + return + } + logger.Log(level, msg, fields...) +} + +// LogM emits the log record for any targets configured for the specified levels. +// Equivalent to calling `Log` once for each level. +func LogM(levels []Level, msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultCustomMultiLog(levels, msg, fields...) + return + } + logger.LogM(levels, msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Trace` level. +func Trace(msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultLog(LvlTrace, msg, fields...) + return + } + logger.Trace(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Debug` level. +func Debug(msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultLog(LvlDebug, msg, fields...) + return + } + logger.Debug(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Info` level. +func Info(msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultLog(LvlInfo, msg, fields...) + return + } + logger.Info(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Warn` level. +func Warn(msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultLog(LvlWarn, msg, fields...) + return + } + logger.Warn(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Error` level. +func Error(msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultLog(LvlError, msg, fields...) + return + } + logger.Error(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Critical` level. +// DEPRECATED: Either use Error or Fatal. +func Critical(msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultLog(LvlCritical, msg, fields...) + return + } + logger.Critical(msg, fields...) +} + +func Fatal(msg string, fields ...Field) { + logger := getGlobalLogger() + if logger == nil { + defaultLog(LvlFatal, msg, fields...) + return + } + logger.Fatal(msg, fields...) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/graphql.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/graphql.go new file mode 100644 index 00000000..a09e0699 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/graphql.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "context" +) + +// GraphQLLogger is used to log panics that occur during query execution. +type GraphQLLogger struct { + logger *Logger +} + +func NewGraphQLLogger(logger *Logger) *GraphQLLogger { + return &GraphQLLogger{logger: logger} +} + +// LogPanic satisfies the graphql/log.Logger interface. +// It converts the panic into an error. +func (l *GraphQLLogger) LogPanic(_ context.Context, value interface{}) { + l.logger.Error("Error while executing GraphQL query", Any("error", value)) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/levels.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/levels.go new file mode 100644 index 00000000..1c88e816 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/levels.go @@ -0,0 +1,58 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import "github.com/mattermost/logr/v2" + +// Standard levels. +var ( + LvlPanic = logr.Panic // ID = 0 + LvlFatal = logr.Fatal // ID = 1 + LvlError = logr.Error // ID = 2 + LvlWarn = logr.Warn // ID = 3 + LvlInfo = logr.Info // ID = 4 + LvlDebug = logr.Debug // ID = 5 + LvlTrace = logr.Trace // ID = 6 + StdAll = []Level{LvlPanic, LvlFatal, LvlError, LvlWarn, LvlInfo, LvlDebug, LvlTrace, LvlStdLog} + // non-standard "critical" level + LvlCritical = Level{ID: 7, Name: "critical"} + // used by redirected standard logger + LvlStdLog = Level{ID: 10, Name: "stdlog"} + // used only by the logger + LvlLogError = Level{ID: 11, Name: "logerror", Stacktrace: true} +) + +// Register custom (discrete) levels here. +// !!!!! Custom ID's must be between 20 and 32,768 !!!!!! +var ( + // used by the audit system + LvlAuditAPI = Level{ID: 100, Name: "audit-api"} + LvlAuditContent = Level{ID: 101, Name: "audit-content"} + LvlAuditPerms = Level{ID: 102, Name: "audit-permissions"} + LvlAuditCLI = Level{ID: 103, Name: "audit-cli"} + + // used by the TCP log target + LvlTCPLogTarget = Level{ID: 120, Name: "TcpLogTarget"} + + // used by Remote Cluster Service + LvlRemoteClusterServiceDebug = Level{ID: 130, Name: "RemoteClusterServiceDebug"} + LvlRemoteClusterServiceError = Level{ID: 131, Name: "RemoteClusterServiceError"} + LvlRemoteClusterServiceWarn = Level{ID: 132, Name: "RemoteClusterServiceWarn"} + + // used by Shared Channel Sync Service + LvlSharedChannelServiceDebug = Level{ID: 200, Name: "SharedChannelServiceDebug"} + LvlSharedChannelServiceError = Level{ID: 201, Name: "SharedChannelServiceError"} + LvlSharedChannelServiceWarn = Level{ID: 202, Name: "SharedChannelServiceWarn"} + LvlSharedChannelServiceMessagesInbound = Level{ID: 203, Name: "SharedChannelServiceMsgInbound"} + LvlSharedChannelServiceMessagesOutbound = Level{ID: 204, Name: "SharedChannelServiceMsgOutbound"} + + // Focalboard + LvlFBTelemetry = Level{ID: 9000, Name: "telemetry"} + LvlFBMetrics = Level{ID: 9001, Name: "metrics"} +) + +// Combinations for LogM (log multi). +var ( + MLvlAuditAll = []Level{LvlAuditAPI, LvlAuditContent, LvlAuditPerms, LvlAuditCLI} +) diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go new file mode 100644 index 00000000..9b4bb820 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go @@ -0,0 +1,443 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +// Package mlog provides a simple wrapper around Logr. +package mlog + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "sync/atomic" + "time" + + "github.com/mattermost/logr/v2" + logrcfg "github.com/mattermost/logr/v2/config" +) + +const ( + ShutdownTimeout = time.Second * 15 + FlushTimeout = time.Second * 15 + DefaultMaxQueueSize = 1000 + DefaultMetricsUpdateFreqMillis = 15000 +) + +type LoggerIFace interface { + IsLevelEnabled(Level) bool + Debug(string, ...Field) + Info(string, ...Field) + Warn(string, ...Field) + Error(string, ...Field) + Critical(string, ...Field) + Log(Level, string, ...Field) + LogM([]Level, string, ...Field) +} + +// Type and function aliases from Logr to limit the spread of dependencies. +type Field = logr.Field +type Level = logr.Level +type Option = logr.Option +type Target = logr.Target +type TargetInfo = logr.TargetInfo +type LogRec = logr.LogRec +type LogCloner = logr.LogCloner +type MetricsCollector = logr.MetricsCollector +type TargetCfg = logrcfg.TargetCfg +type TargetFactory = logrcfg.TargetFactory +type FormatterFactory = logrcfg.FormatterFactory +type Factories = logrcfg.Factories +type Sugar = logr.Sugar + +// LoggerConfiguration is a map of LogTarget configurations. +type LoggerConfiguration map[string]TargetCfg + +func (lc LoggerConfiguration) Append(cfg LoggerConfiguration) { + for k, v := range cfg { + lc[k] = v + } +} + +func (lc LoggerConfiguration) toTargetCfg() map[string]logrcfg.TargetCfg { + tcfg := make(map[string]logrcfg.TargetCfg) + for k, v := range lc { + tcfg[k] = v + } + return tcfg +} + +// Any picks the best supported field type based on type of val. +// For best performance when passing a struct (or struct pointer), +// implement `logr.LogWriter` on the struct, otherwise reflection +// will be used to generate a string representation. +var Any = logr.Any + +// Int64 constructs a field containing a key and Int64 value. +var Int64 = logr.Int64 + +// Int32 constructs a field containing a key and Int32 value. +var Int32 = logr.Int32 + +// Int constructs a field containing a key and Int value. +var Int = logr.Int + +// Uint64 constructs a field containing a key and Uint64 value. +var Uint64 = logr.Uint64 + +// Uint32 constructs a field containing a key and Uint32 value. +var Uint32 = logr.Uint32 + +// Uint constructs a field containing a key and Uint value. +var Uint = logr.Uint + +// Float64 constructs a field containing a key and Float64 value. +var Float64 = logr.Float64 + +// Float32 constructs a field containing a key and Float32 value. +var Float32 = logr.Float32 + +// String constructs a field containing a key and String value. +var String = logr.String + +// Stringer constructs a field containing a key and a fmt.Stringer value. +// The fmt.Stringer's `String` method is called lazily. +var Stringer = func(key string, s fmt.Stringer) logr.Field { + if s == nil { + return Field{Key: key, Type: logr.StringType, String: ""} + } + return Field{Key: key, Type: logr.StringType, String: s.String()} +} + +// Err constructs a field containing a default key ("error") and error value. +var Err = func(err error) logr.Field { + return NamedErr("error", err) +} + +// NamedErr constructs a field containing a key and error value. +var NamedErr = func(key string, err error) logr.Field { + if err == nil { + return Field{Key: key, Type: logr.StringType, String: ""} + } + return Field{Key: key, Type: logr.StringType, String: err.Error()} +} + +// Bool constructs a field containing a key and bool value. +var Bool = logr.Bool + +// Time constructs a field containing a key and time.Time value. +var Time = logr.Time + +// Duration constructs a field containing a key and time.Duration value. +var Duration = logr.Duration + +// Millis constructs a field containing a key and timestamp value. +// The timestamp is expected to be milliseconds since Jan 1, 1970 UTC. +var Millis = logr.Millis + +// Array constructs a field containing a key and array value. +var Array = logr.Array + +// Map constructs a field containing a key and map value. +var Map = logr.Map + +// Logger provides a thin wrapper around a Logr instance. This is a struct instead of an interface +// so that there are no allocations on the heap each interface method invocation. Normally not +// something to be concerned about, but logging calls for disabled levels should have as little CPU +// and memory impact as possible. Most of these wrapper calls will be inlined as well. +type Logger struct { + log *logr.Logger + lockConfig *int32 +} + +// NewLogger creates a new Logger instance which can be configured via `(*Logger).Configure`. +// Some options with invalid values can cause an error to be returned, however `NewLogger()` +// using just defaults never errors. +func NewLogger(options ...Option) (*Logger, error) { + options = append(options, logr.StackFilter(logr.GetPackageName("NewLogger"))) + + lgr, err := logr.New(options...) + if err != nil { + return nil, err + } + + log := lgr.NewLogger() + var lockConfig int32 + + return &Logger{ + log: &log, + lockConfig: &lockConfig, + }, nil +} + +// Configure provides a new configuration for this logger. +// Zero or more sources of config can be provided: +// cfgFile - path to file containing JSON +// cfgEscaped - JSON string probably from ENV var +// +// For each case JSON containing log targets is provided. Target name collisions are resolved +// using the following precedence: +// cfgFile > cfgEscaped +// +// An optional set of factories can be provided which will be called to create any target +// types or formatters not built-in. +func (l *Logger) Configure(cfgFile string, cfgEscaped string, factories *Factories) error { + if atomic.LoadInt32(l.lockConfig) != 0 { + return ErrConfigurationLock + } + + cfgMap := make(LoggerConfiguration) + + // Add config from file + if cfgFile != "" { + b, err := ioutil.ReadFile(cfgFile) + if err != nil { + return fmt.Errorf("error reading logger config file %s: %w", cfgFile, err) + } + + var mapCfgFile LoggerConfiguration + if err := json.Unmarshal(b, &mapCfgFile); err != nil { + return fmt.Errorf("error decoding logger config file %s: %w", cfgFile, err) + } + cfgMap.Append(mapCfgFile) + } + + // Add config from escaped json string + if cfgEscaped != "" { + var mapCfgEscaped LoggerConfiguration + if err := json.Unmarshal([]byte(cfgEscaped), &mapCfgEscaped); err != nil { + return fmt.Errorf("error decoding logger config as escaped json: %w", err) + } + cfgMap.Append(mapCfgEscaped) + } + + if len(cfgMap) == 0 { + return nil + } + + return logrcfg.ConfigureTargets(l.log.Logr(), cfgMap.toTargetCfg(), factories) +} + +// ConfigureTargets provides a new configuration for this logger via a `LoggerConfig` map. +// Typically `mlog.Configure` is used instead which accepts JSON formatted configuration. +// An optional set of factories can be provided which will be called to create any target +// types or formatters not built-in. +func (l *Logger) ConfigureTargets(cfg LoggerConfiguration, factories *Factories) error { + if atomic.LoadInt32(l.lockConfig) != 0 { + return ErrConfigurationLock + } + return logrcfg.ConfigureTargets(l.log.Logr(), cfg.toTargetCfg(), factories) +} + +// LockConfiguration disallows further configuration changes until `UnlockConfiguration` +// is called. The previous locked stated is returned. +func (l *Logger) LockConfiguration() bool { + old := atomic.SwapInt32(l.lockConfig, 1) + return old != 0 +} + +// UnlockConfiguration allows configuration changes. The previous locked stated is returned. +func (l *Logger) UnlockConfiguration() bool { + old := atomic.SwapInt32(l.lockConfig, 0) + return old != 0 +} + +// IsConfigurationLocked returns the current state of the configuration lock. +func (l *Logger) IsConfigurationLocked() bool { + return atomic.LoadInt32(l.lockConfig) != 0 +} + +// With creates a new Logger with the specified fields. This is a light-weight +// operation and can be called on demand. +func (l *Logger) With(fields ...Field) *Logger { + logWith := l.log.With(fields...) + return &Logger{ + log: &logWith, + lockConfig: l.lockConfig, + } +} + +// IsLevelEnabled returns true only if at least one log target is +// configured to emit the specified log level. Use this check when +// gathering the log info may be expensive. +// +// Note, transformations and serializations done via fields are already +// lazily evaluated and don't require this check beforehand. +func (l *Logger) IsLevelEnabled(level Level) bool { + return l.log.IsLevelEnabled(level) +} + +// Log emits the log record for any targets configured for the specified level. +func (l *Logger) Log(level Level, msg string, fields ...Field) { + l.log.Log(level, msg, fields...) +} + +// LogM emits the log record for any targets configured for the specified levels. +// Equivalent to calling `Log` once for each level. +func (l *Logger) LogM(levels []Level, msg string, fields ...Field) { + l.log.LogM(levels, msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Trace` level. +func (l *Logger) Trace(msg string, fields ...Field) { + l.log.Trace(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Debug` level. +func (l *Logger) Debug(msg string, fields ...Field) { + l.log.Debug(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Info` level. +func (l *Logger) Info(msg string, fields ...Field) { + l.log.Info(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Warn` level. +func (l *Logger) Warn(msg string, fields ...Field) { + l.log.Warn(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Error` level. +func (l *Logger) Error(msg string, fields ...Field) { + l.log.Error(msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Critical` level. +func (l *Logger) Critical(msg string, fields ...Field) { + l.log.Log(LvlCritical, msg, fields...) +} + +// Convenience method equivalent to calling `Log` with the `Fatal` level, +// followed by `os.Exit(1)`. +func (l *Logger) Fatal(msg string, fields ...Field) { + l.log.Log(logr.Fatal, msg, fields...) + _ = l.Shutdown() + os.Exit(1) +} + +// HasTargets returns true if at least one log target has been added. +func (l *Logger) HasTargets() bool { + return l.log.Logr().HasTargets() +} + +// StdLogger creates a standard logger backed by this logger. +// All log records are output with the specified level. +func (l *Logger) StdLogger(level Level) *log.Logger { + return l.log.StdLogger(level) +} + +// StdLogWriter returns a writer that can be hooked up to the output of a golang standard logger +// anything written will be interpreted as log entries and passed to this logger. +func (l *Logger) StdLogWriter() io.Writer { + return &logWriter{ + logger: l, + } +} + +// RedirectStdLog redirects output from the standard library's package-global logger +// to this logger at the specified level and with zero or more Field's. Since this logger already +// handles caller annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// A function is returned that restores the original prefix and flags and resets the standard +// library's output to os.Stdout. +func (l *Logger) RedirectStdLog(level Level, fields ...Field) func() { + return l.log.Logr().RedirectStdLog(level, fields...) +} + +// RemoveTargets safely removes one or more targets based on the filtering method. +// `f` should return true to delete the target, false to keep it. +// When removing a target, best effort is made to write any queued log records before +// closing, with ctx determining how much time can be spent in total. +// Note, keep the timeout short since this method blocks certain logging operations. +func (l *Logger) RemoveTargets(ctx context.Context, f func(ti TargetInfo) bool) error { + return l.log.Logr().RemoveTargets(ctx, f) +} + +// SetMetricsCollector sets (or resets) the metrics collector to be used for gathering +// metrics for all targets. Only targets added after this call will use the collector. +// +// To ensure all targets use a collector, use the `SetMetricsCollector` option when +// creating the Logger instead, or configure/reconfigure the Logger after calling this method. +func (l *Logger) SetMetricsCollector(collector MetricsCollector, updateFrequencyMillis int64) { + l.log.Logr().SetMetricsCollector(collector, updateFrequencyMillis) +} + +// Sugar creates a new `Logger` with a less structured API. Any fields are preserved. +func (l *Logger) Sugar(fields ...Field) Sugar { + return l.log.Sugar(fields...) +} + +// Flush forces all targets to write out any queued log records with a default timeout. +func (l *Logger) Flush() error { + ctx, cancel := context.WithTimeout(context.Background(), FlushTimeout) + defer cancel() + return l.log.Logr().FlushWithTimeout(ctx) +} + +// Flush forces all targets to write out any queued log records with the specified timeout. +func (l *Logger) FlushWithTimeout(ctx context.Context) error { + return l.log.Logr().FlushWithTimeout(ctx) +} + +// Shutdown shuts down the logger after making best efforts to flush any +// remaining records. +func (l *Logger) Shutdown() error { + ctx, cancel := context.WithTimeout(context.Background(), ShutdownTimeout) + defer cancel() + return l.log.Logr().ShutdownWithTimeout(ctx) +} + +// Shutdown shuts down the logger after making best efforts to flush any +// remaining records. +func (l *Logger) ShutdownWithTimeout(ctx context.Context) error { + return l.log.Logr().ShutdownWithTimeout(ctx) +} + +// GetPackageName reduces a fully qualified function name to the package name +// By sirupsen: https://github.com/sirupsen/logrus/blob/master/entry.go +func GetPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + return f +} + +// ShouldQuote returns true if val contains any characters that might be unsafe +// when injecting log output into an aggregator, viewer or report. +// Returning true means that val should be surrounded by quotation marks before being +// output into logs. +func ShouldQuote(val string) bool { + for _, c := range val { + if !((c >= '0' && c <= '9') || + (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + c == '-' || c == '.' || c == '_' || c == '/' || c == '@' || c == '^' || c == '+') { + return true + } + } + return false +} + +type logWriter struct { + logger *Logger +} + +func (lw *logWriter) Write(p []byte) (int, error) { + lw.logger.Info(string(p)) + return len(p), nil +} + +// ErrConfigurationLock is returned when one of a logger's configuration APIs is called +// while the configuration is locked. +var ErrConfigurationLock = errors.New("configuration is locked") diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/options.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/options.go new file mode 100644 index 00000000..3a98b480 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/options.go @@ -0,0 +1,55 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import "github.com/mattermost/logr/v2" + +// MaxQueueSize is the maximum number of log records that can be queued. +// If exceeded, `OnQueueFull` is called which determines if the log +// record will be dropped or block until add is successful. +// Defaults to DefaultMaxQueueSize. +func MaxQueueSize(size int) Option { + return logr.MaxQueueSize(size) +} + +// OnLoggerError, when not nil, is called any time an internal +// logging error occurs. For example, this can happen when a +// target cannot connect to its data sink. +func OnLoggerError(f func(error)) Option { + return logr.OnLoggerError(f) +} + +// OnQueueFull, when not nil, is called on an attempt to add +// a log record to a full Logr queue. +// `MaxQueueSize` can be used to modify the maximum queue size. +// This function should return quickly, with a bool indicating whether +// the log record should be dropped (true) or block until the log record +// is successfully added (false). If nil then blocking (false) is assumed. +func OnQueueFull(f func(rec *LogRec, maxQueueSize int) bool) Option { + return logr.OnQueueFull(f) +} + +// OnTargetQueueFull, when not nil, is called on an attempt to add +// a log record to a full target queue provided the target supports reporting +// this condition. +// This function should return quickly, with a bool indicating whether +// the log record should be dropped (true) or block until the log record +// is successfully added (false). If nil then blocking (false) is assumed. +func OnTargetQueueFull(f func(target Target, rec *LogRec, maxQueueSize int) bool) Option { + return logr.OnTargetQueueFull(f) +} + +// SetMetricsCollector enables metrics collection by supplying a MetricsCollector. +// The MetricsCollector provides counters and gauges that are updated by log targets. +// `updateFreqMillis` determines how often polled metrics are updated. Defaults to 15000 (15 seconds) +// and must be at least 250 so we don't peg the CPU. +func SetMetricsCollector(collector MetricsCollector, updateFreqMillis int64) Option { + return logr.SetMetricsCollector(collector, updateFreqMillis) +} + +// StackFilter provides a list of package names to exclude from the top of +// stack traces. The Logr packages are automatically filtered. +func StackFilter(pkg ...string) Option { + return logr.StackFilter(pkg...) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/tlog.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/tlog.go new file mode 100644 index 00000000..ef8f6016 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/tlog.go @@ -0,0 +1,79 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "bytes" + "io" + "os" + "sync" + + "github.com/mattermost/logr/v2" + "github.com/mattermost/logr/v2/formatters" + "github.com/mattermost/logr/v2/targets" +) + +// AddWriterTarget adds a simple io.Writer target to an existing Logger. +// The `io.Writer` can be a buffer which is useful for testing. +// When adding a buffer to collect logs make sure to use `mlog.Buffer` which is +// a thread safe version of `bytes.Buffer`. +func AddWriterTarget(logger *Logger, w io.Writer, useJSON bool, levels ...Level) error { + filter := logr.NewCustomFilter(levels...) + + var formatter logr.Formatter + if useJSON { + formatter = &formatters.JSON{EnableCaller: true} + } else { + formatter = &formatters.Plain{EnableCaller: true} + } + + target := targets.NewWriterTarget(w) + return logger.log.Logr().AddTarget(target, "_testWriter", filter, formatter, 1000) +} + +// CreateConsoleTestLogger creates a logger for unit tests. Log records are output to `os.Stdout`. +// Logs can also be mirrored to the optional `io.Writer`. +func CreateConsoleTestLogger(useJSON bool, level Level) *Logger { + logger, _ := NewLogger() + + filter := logr.StdFilter{ + Lvl: level, + Stacktrace: LvlPanic, + } + + var formatter logr.Formatter + if useJSON { + formatter = &formatters.JSON{EnableCaller: true} + } else { + formatter = &formatters.Plain{EnableCaller: true} + } + + target := targets.NewWriterTarget(os.Stdout) + if err := logger.log.Logr().AddTarget(target, "_testcon", filter, formatter, 1000); err != nil { + panic(err) + } + return logger +} + +// Buffer provides a thread-safe buffer useful for logging to memory in unit tests. +type Buffer struct { + buf bytes.Buffer + mux sync.Mutex +} + +func (b *Buffer) Read(p []byte) (n int, err error) { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.Read(p) +} +func (b *Buffer) Write(p []byte) (n int, err error) { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.Write(p) +} +func (b *Buffer) String() string { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.String() +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/jsonutils/json.go b/vendor/github.com/mattermost/mattermost-server/v6/utils/jsonutils/json.go similarity index 66% rename from vendor/github.com/mattermost/mattermost-server/v5/utils/jsonutils/json.go rename to vendor/github.com/mattermost/mattermost-server/v6/utils/jsonutils/json.go index 4651ec87..9d5e7872 100644 --- a/vendor/github.com/mattermost/mattermost-server/v5/utils/jsonutils/json.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/utils/jsonutils/json.go @@ -10,34 +10,34 @@ import ( "github.com/pkg/errors" ) -type HumanizedJsonError struct { +type HumanizedJSONError struct { Err error Line int Character int } -func (e *HumanizedJsonError) Error() string { +func (e *HumanizedJSONError) Error() string { return e.Err.Error() } -// HumanizeJsonError extracts error offsets and annotates the error with useful context -func HumanizeJsonError(err error, data []byte) error { +// HumanizeJSONError extracts error offsets and annotates the error with useful context +func HumanizeJSONError(err error, data []byte) error { if syntaxError, ok := err.(*json.SyntaxError); ok { - return NewHumanizedJsonError(syntaxError, data, syntaxError.Offset) + return NewHumanizedJSONError(syntaxError, data, syntaxError.Offset) } else if unmarshalError, ok := err.(*json.UnmarshalTypeError); ok { - return NewHumanizedJsonError(unmarshalError, data, unmarshalError.Offset) + return NewHumanizedJSONError(unmarshalError, data, unmarshalError.Offset) } else { return err } } -func NewHumanizedJsonError(err error, data []byte, offset int64) *HumanizedJsonError { +func NewHumanizedJSONError(err error, data []byte, offset int64) *HumanizedJSONError { if err == nil { return nil } if offset < 0 || offset > int64(len(data)) { - return &HumanizedJsonError{ + return &HumanizedJSONError{ Err: errors.Wrapf(err, "invalid offset %d", offset), } } @@ -48,7 +48,7 @@ func NewHumanizedJsonError(err error, data []byte, offset int64) *HumanizedJsonE lastLineOffset := bytes.LastIndex(data[:offset], lineSep) character := int(offset) - (lastLineOffset + 1) + 1 - return &HumanizedJsonError{ + return &HumanizedJSONError{ Line: line, Character: character, Err: errors.Wrapf(err, "parsing error at line %d, character %d", line, character), diff --git a/vendor/github.com/minio/md5-simd/LICENSE b/vendor/github.com/minio/md5-simd/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/md5-simd/LICENSE.Golang b/vendor/github.com/minio/md5-simd/LICENSE.Golang new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/LICENSE.Golang @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/minio/md5-simd/README.md b/vendor/github.com/minio/md5-simd/README.md new file mode 100644 index 00000000..fa6fce1a --- /dev/null +++ b/vendor/github.com/minio/md5-simd/README.md @@ -0,0 +1,198 @@ + +# md5-simd + +This is a SIMD accelerated MD5 package, allowing up to either 8 (AVX2) or 16 (AVX512) independent MD5 sums to be calculated on a single CPU core. + +It was originally based on the [md5vec](https://github.com/igneous-systems/md5vec) repository by Igneous Systems, but has been made more flexible by amongst others supporting different message sizes per lane and adding AVX512. + +`md5-simd` integrates a similar mechanism as described in [minio/sha256-simd](https://github.com/minio/sha256-simd#support-for-avx512) for making it easy for clients to take advantages of the parallel nature of the MD5 calculation. This will result in reduced overall CPU load. + +It is important to understand that `md5-simd` **does not speed up** a single threaded MD5 hash sum. +Rather it allows multiple __independent__ MD5 sums to be computed in parallel on the same CPU core, +thereby making more efficient usage of the computing resources. + +## Usage + +[![Documentation](https://godoc.org/github.com/minio/md5-simd?status.svg)](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc) + + +In order to use `md5-simd`, you must first create an `Server` which can be +used to instantiate one or more objects for MD5 hashing. + +These objects conform to the regular [`hash.Hash`](https://pkg.go.dev/hash?tab=doc#Hash) interface +and as such the normal Write/Reset/Sum functionality works as expected. + +As an example: +``` + // Create server + server := md5simd.NewServer() + defer server.Close() + + // Create hashing object (conforming to hash.Hash) + md5Hash := server.NewHash() + defer md5Hash.Close() + + // Write one (or more) blocks + md5Hash.Write(block) + + // Return digest + digest := md5Hash.Sum([]byte{}) +``` + +To keep performance both a [Server](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Server) +and individual [Hasher](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Hasher) should +be closed using the `Close()` function when no longer needed. + +A Hasher can efficiently be re-used by using [`Reset()`](https://pkg.go.dev/hash?tab=doc#Hash) functionality. + +In case your system does not support the instructions required it will fall back to using `crypto/md5` for hashing. + +## Limitations + +As explained above `md5-simd` does not speed up an individual MD5 hash sum computation, +unless some hierarchical tree construct is used but this will result in different outcomes. +Running a single hash on a server results in approximately half the throughput. + +Instead, it allows running multiple MD5 calculations in parallel on a single CPU core. +This can be beneficial in e.g. multi-threaded server applications where many go-routines +are dealing with many requests and multiple MD5 calculations can be packed/scheduled for parallel execution on a single core. + +This will result in a lower overall CPU usage as compared to using the standard `crypto/md5` +functionality where each MD5 hash computation will consume a single thread (core). + +It is best to test and measure the overall CPU usage in a representative usage scenario in your application +to get an overall understanding of the benefits of `md5-simd` as compared to `crypto/md5`, ideally under heavy CPU load. + +Also note that `md5-simd` is best meant to work with large objects, +so if your application only hashes small objects of a few kilobytes +you may be better of by using `crypto/md5`. + +## Performance + +For the best performance writes should be a multiple of 64 bytes, ideally a multiple of 32KB. +To help with that a [`buffered := bufio.NewWriterSize(hasher, 32<<10)`](https://golang.org/pkg/bufio/#NewWriterSize) +can be inserted if you are unsure of the sizes of the writes. +Remember to [flush](https://golang.org/pkg/bufio/#Writer.Flush) `buffered` before reading the hash. + +A single 'server' can process 16 streams concurrently with 1 core (AVX-512) or 2 cores (AVX2). +In situations where it is likely that more than 16 streams are fully loaded it may be beneficial +to use multiple servers. + +The following chart compares the multi-core performance between `crypto/md5` vs the AVX2 vs the AVX512 code: + +![md5-performance-overview](chart/Multi-core-MD5-Aggregated-Hashing-Performance.png) + +Compared to `crypto/md5`, the AVX2 version is up to 4x faster: + +``` +$ benchcmp crypto-md5.txt avx2.txt +benchmark old MB/s new MB/s speedup +BenchmarkParallel/32KB-4 2229.22 7370.50 3.31x +BenchmarkParallel/64KB-4 2233.61 8248.46 3.69x +BenchmarkParallel/128KB-4 2235.43 8660.74 3.87x +BenchmarkParallel/256KB-4 2236.39 8863.87 3.96x +BenchmarkParallel/512KB-4 2238.05 8985.39 4.01x +BenchmarkParallel/1MB-4 2233.56 9042.62 4.05x +BenchmarkParallel/2MB-4 2224.11 9014.46 4.05x +BenchmarkParallel/4MB-4 2199.78 8993.61 4.09x +BenchmarkParallel/8MB-4 2182.48 8748.22 4.01x +``` + +Compared to `crypto/md5`, the AVX512 is up to 8x faster (for larger block sizes): + +``` +$ benchcmp crypto-md5.txt avx512.txt +benchmark old MB/s new MB/s speedup +BenchmarkParallel/32KB-4 2229.22 11605.78 5.21x +BenchmarkParallel/64KB-4 2233.61 14329.65 6.42x +BenchmarkParallel/128KB-4 2235.43 16166.39 7.23x +BenchmarkParallel/256KB-4 2236.39 15570.09 6.96x +BenchmarkParallel/512KB-4 2238.05 16705.83 7.46x +BenchmarkParallel/1MB-4 2233.56 16941.95 7.59x +BenchmarkParallel/2MB-4 2224.11 17136.01 7.70x +BenchmarkParallel/4MB-4 2199.78 17218.61 7.83x +BenchmarkParallel/8MB-4 2182.48 17252.88 7.91x +``` + +These measurements were performed on AWS EC2 instance of type `c5.xlarge` equipped with a Xeon Platinum 8124M CPU at 3.0 GHz. + +If only one or two inputs are available the scalar calculation method will be used for the +optimal speed in these cases. + +## Operation + +To make operation as easy as possible there is a “Server” coordinating everything. The server keeps track of individual hash states and updates them as new data comes in. This can be visualized as follows: + +![server-architecture](chart/server-architecture.png) + +The data is sent to the server from each hash input in blocks of up to 32KB per round. In our testing we found this to be the block size that yielded the best results. + +Whenever there is data available the server will collect data for up to 16 hashes and process all 16 lanes in parallel. This means that if 16 hashes have data available all the lanes will be filled. However since that may not be the case, the server will fill less lanes and do a round anyway. Lanes can also be partially filled if less than 32KB of data is written. + +![server-lanes-example](chart/server-lanes-example.png) + +In this example 4 lanes are fully filled and 2 lanes are partially filled. In this case the black areas will simply be masked out from the results and ignored. This is also why calculating a single hash on a server will not result in any speedup and hash writes should be a multiple of 32KB for the best performance. + +For AVX512 all 16 calculations will be done on a single core, on AVX2 on 2 cores if there is data for more than 8 lanes. +So for optimal usage there should be data available for all 16 hashes. It may be perfectly reasonable to use more than 16 concurrent hashes. + + +## Design & Tech + +md5-simd has both an AVX2 (8-lane parallel), and an AVX512 (16-lane parallel version) algorithm to accelerate the computation with the following function definitions: +``` +//go:noescape +func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int) + +//go:noescape +func block16(state *uint32, ptrs *int64, mask uint64, n int) +``` + +The AVX2 version is based on the [md5vec](https://github.com/igneous-systems/md5vec) repository and is essentially unchanged except for minor (cosmetic) changes. + +The AVX512 version is derived from the AVX2 version but adds some further optimizations and simplifications. + +### Caching in upper ZMM registers + +The AVX2 version passes in a `cache8` block of memory (about 0.5 KB) for temporary storage of intermediate results during `ROUND1` which are subsequently used during `ROUND2` through to `ROUND4`. + +Since AVX512 has double the amount of registers (32 ZMM registers as compared to 16 YMM registers), it is possible to use the upper 16 ZMM registers for keeping the intermediate states on the CPU. As such, there is no need to pass in a corresponding `cache16` into the AVX512 block function. + +### Direct loading using 64-bit pointers + +The AVX2 uses the `VPGATHERDD` instruction (for YMM) to do a parallel load of 8 lanes using (8 independent) 32-bit offets. Since there is no control over how the 8 slices that are passed into the (Golang) `blockMd5` function are laid out into memory, it is not possible to derive a "base" address and corresponding offsets (all within 32-bits) for all 8 slices. + +As such the AVX2 version uses an interim buffer to collect the byte slices to be hashed from all 8 inut slices and passed this buffer along with (fixed) 32-bit offsets into the assembly code. + +For the AVX512 version this interim buffer is not needed since the AVX512 code uses a pair of `VPGATHERQD` instructions to directly dereference 64-bit pointers (from a base register address that is initialized to zero). + +Note that two load (gather) instructions are needed because the AVX512 version processes 16-lanes in parallel, requiring 16 times 64-bit = 1024 bits in total to be loaded. A simple `VALIGND` and `VPORD` are subsequently used to merge the lower and upper halves together into a single ZMM register (that contains 16 lanes of 32-bit DWORDS). + +### Masking support + +Due to the fact that pointers are passed directly from the Golang slices, we need to protect against NULL pointers. +For this a 16-bit mask is passed in the AVX512 assembly code which is used during the `VPGATHERQD` instructions to mask out lanes that could otherwise result in segment violations. + +### Minor optimizations + +The `roll` macro (three instructions on AVX2) is no longer needed for AVX512 and is replaced by a single `VPROLD` instruction. + +Also several logical operations from the various ROUNDS of the AVX2 version could be combined into a single instruction using ternary logic (with the `VPTERMLOGD` instruction), resulting in a further simplification and speed-up. + +## Low level block function performance + +The benchmark below shows the (single thread) maximum performance of the `block()` function for AVX2 (having 8 lanes) and AVX512 (having 16 lanes). Also the baseline single-core performance from the standard `crypto/md5` package is shown for comparison. + +``` +BenchmarkCryptoMd5-4 687.66 MB/s 0 B/op 0 allocs/op +BenchmarkBlock8-4 4144.80 MB/s 0 B/op 0 allocs/op +BenchmarkBlock16-4 8228.88 MB/s 0 B/op 0 allocs/op +``` + +## License + +`md5-simd` is released under the Apache License v2.0. You can find the complete text in the file LICENSE. + +## Contributing + +Contributions are welcome, please send PRs for any enhancements. \ No newline at end of file diff --git a/vendor/github.com/minio/md5-simd/block16_amd64.s b/vendor/github.com/minio/md5-simd/block16_amd64.s new file mode 100644 index 00000000..be0a43a3 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block16_amd64.s @@ -0,0 +1,228 @@ +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +//+build !noasm,!appengine,gc + +// This is the AVX512 implementation of the MD5 block function (16-way parallel) + +#define prep(index) \ + KMOVQ kmask, ktmp \ + VPGATHERDD index*4(base)(ptrs*1), ktmp, mem + +#define ROUND1(a, b, c, d, index, const, shift) \ + VPXORQ c, tmp, tmp \ + VPADDD 64*const(consts), a, a \ + VPADDD mem, a, a \ + VPTERNLOGD $0x6C, b, d, tmp \ + prep(index) \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND1noload(a, b, c, d, const, shift) \ + VPXORQ c, tmp, tmp \ + VPADDD 64*const(consts), a, a \ + VPADDD mem, a, a \ + VPTERNLOGD $0x6C, b, d, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND2(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VANDNPD c, tmp, tmp \ + VPTERNLOGD $0xEC, b, tmp, tmp2 \ + VMOVAPD c, tmp \ + VPADDD tmp2, a, a \ + VMOVAPD c, tmp2 \ + VPROLD $shift, a, a \ + VPADDD b, a, a + +#define ROUND3(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VPTERNLOGD $0x96, b, d, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD b, tmp \ + VPADDD b, a, a + +#define ROUND4(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VPTERNLOGD $0x36, b, c, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VPXORQ c, ones, tmp \ + VPADDD b, a, a + +TEXT ·block16(SB), 4, $0-40 + + MOVQ state+0(FP), BX + MOVQ base+8(FP), SI + MOVQ ptrs+16(FP), AX + KMOVQ mask+24(FP), K1 + MOVQ n+32(FP), DX + MOVQ ·avx512md5consts+0(SB), DI + +#define a Z0 +#define b Z1 +#define c Z2 +#define d Z3 + +#define sa Z4 +#define sb Z5 +#define sc Z6 +#define sd Z7 + +#define tmp Z8 +#define tmp2 Z9 +#define ptrs Z10 +#define ones Z12 +#define mem Z15 + +#define kmask K1 +#define ktmp K3 + +// ---------------------------------------------------------- +// Registers Z16 through to Z31 are used for caching purposes +// ---------------------------------------------------------- + +#define dig BX +#define count DX +#define base SI +#define consts DI + + // load digest into state registers + VMOVUPD (dig), a + VMOVUPD 0x40(dig), b + VMOVUPD 0x80(dig), c + VMOVUPD 0xc0(dig), d + + // load source pointers + VMOVUPD 0x00(AX), ptrs + + MOVQ $-1, AX + VPBROADCASTQ AX, ones + +loop: + VMOVAPD a, sa + VMOVAPD b, sb + VMOVAPD c, sc + VMOVAPD d, sd + + prep(0) + VMOVAPD d, tmp + VMOVAPD mem, Z16 + + ROUND1(a,b,c,d, 1,0x00, 7) + VMOVAPD mem, Z17 + ROUND1(d,a,b,c, 2,0x01,12) + VMOVAPD mem, Z18 + ROUND1(c,d,a,b, 3,0x02,17) + VMOVAPD mem, Z19 + ROUND1(b,c,d,a, 4,0x03,22) + VMOVAPD mem, Z20 + ROUND1(a,b,c,d, 5,0x04, 7) + VMOVAPD mem, Z21 + ROUND1(d,a,b,c, 6,0x05,12) + VMOVAPD mem, Z22 + ROUND1(c,d,a,b, 7,0x06,17) + VMOVAPD mem, Z23 + ROUND1(b,c,d,a, 8,0x07,22) + VMOVAPD mem, Z24 + ROUND1(a,b,c,d, 9,0x08, 7) + VMOVAPD mem, Z25 + ROUND1(d,a,b,c,10,0x09,12) + VMOVAPD mem, Z26 + ROUND1(c,d,a,b,11,0x0a,17) + VMOVAPD mem, Z27 + ROUND1(b,c,d,a,12,0x0b,22) + VMOVAPD mem, Z28 + ROUND1(a,b,c,d,13,0x0c, 7) + VMOVAPD mem, Z29 + ROUND1(d,a,b,c,14,0x0d,12) + VMOVAPD mem, Z30 + ROUND1(c,d,a,b,15,0x0e,17) + VMOVAPD mem, Z31 + + ROUND1noload(b,c,d,a, 0x0f,22) + + VMOVAPD d, tmp + VMOVAPD d, tmp2 + + ROUND2(a,b,c,d, Z17,0x10, 5) + ROUND2(d,a,b,c, Z22,0x11, 9) + ROUND2(c,d,a,b, Z27,0x12,14) + ROUND2(b,c,d,a, Z16,0x13,20) + ROUND2(a,b,c,d, Z21,0x14, 5) + ROUND2(d,a,b,c, Z26,0x15, 9) + ROUND2(c,d,a,b, Z31,0x16,14) + ROUND2(b,c,d,a, Z20,0x17,20) + ROUND2(a,b,c,d, Z25,0x18, 5) + ROUND2(d,a,b,c, Z30,0x19, 9) + ROUND2(c,d,a,b, Z19,0x1a,14) + ROUND2(b,c,d,a, Z24,0x1b,20) + ROUND2(a,b,c,d, Z29,0x1c, 5) + ROUND2(d,a,b,c, Z18,0x1d, 9) + ROUND2(c,d,a,b, Z23,0x1e,14) + ROUND2(b,c,d,a, Z28,0x1f,20) + + VMOVAPD c, tmp + + ROUND3(a,b,c,d, Z21,0x20, 4) + ROUND3(d,a,b,c, Z24,0x21,11) + ROUND3(c,d,a,b, Z27,0x22,16) + ROUND3(b,c,d,a, Z30,0x23,23) + ROUND3(a,b,c,d, Z17,0x24, 4) + ROUND3(d,a,b,c, Z20,0x25,11) + ROUND3(c,d,a,b, Z23,0x26,16) + ROUND3(b,c,d,a, Z26,0x27,23) + ROUND3(a,b,c,d, Z29,0x28, 4) + ROUND3(d,a,b,c, Z16,0x29,11) + ROUND3(c,d,a,b, Z19,0x2a,16) + ROUND3(b,c,d,a, Z22,0x2b,23) + ROUND3(a,b,c,d, Z25,0x2c, 4) + ROUND3(d,a,b,c, Z28,0x2d,11) + ROUND3(c,d,a,b, Z31,0x2e,16) + ROUND3(b,c,d,a, Z18,0x2f,23) + + VPXORQ d, ones, tmp + + ROUND4(a,b,c,d, Z16,0x30, 6) + ROUND4(d,a,b,c, Z23,0x31,10) + ROUND4(c,d,a,b, Z30,0x32,15) + ROUND4(b,c,d,a, Z21,0x33,21) + ROUND4(a,b,c,d, Z28,0x34, 6) + ROUND4(d,a,b,c, Z19,0x35,10) + ROUND4(c,d,a,b, Z26,0x36,15) + ROUND4(b,c,d,a, Z17,0x37,21) + ROUND4(a,b,c,d, Z24,0x38, 6) + ROUND4(d,a,b,c, Z31,0x39,10) + ROUND4(c,d,a,b, Z22,0x3a,15) + ROUND4(b,c,d,a, Z29,0x3b,21) + ROUND4(a,b,c,d, Z20,0x3c, 6) + ROUND4(d,a,b,c, Z27,0x3d,10) + ROUND4(c,d,a,b, Z18,0x3e,15) + ROUND4(b,c,d,a, Z25,0x3f,21) + + VPADDD sa, a, a + VPADDD sb, b, b + VPADDD sc, c, c + VPADDD sd, d, d + + LEAQ 64(base), base + SUBQ $64, count + JNE loop + + VMOVUPD a, (dig) + VMOVUPD b, 0x40(dig) + VMOVUPD c, 0x80(dig) + VMOVUPD d, 0xc0(dig) + + VZEROUPPER + RET diff --git a/vendor/github.com/minio/md5-simd/block8_amd64.s b/vendor/github.com/minio/md5-simd/block8_amd64.s new file mode 100644 index 00000000..f57db17a --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block8_amd64.s @@ -0,0 +1,281 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2018 Igneous Systems +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +// This is the AVX2 implementation of the MD5 block function (8-way parallel) + +// block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int) +TEXT ·block8(SB), 4, $0-40 + MOVQ state+0(FP), BX + MOVQ base+8(FP), SI + MOVQ bufs+16(FP), AX + MOVQ cache+24(FP), CX + MOVQ n+32(FP), DX + MOVQ ·avx256md5consts+0(SB), DI + + // Align cache (which is stack allocated by the compiler) + // to a 256 bit boundary (ymm register alignment) + // The cache8 type is deliberately oversized to permit this. + ADDQ $31, CX + ANDB $-32, CL + +#define a Y0 +#define b Y1 +#define c Y2 +#define d Y3 + +#define sa Y4 +#define sb Y5 +#define sc Y6 +#define sd Y7 + +#define tmp Y8 +#define tmp2 Y9 + +#define mask Y10 +#define off Y11 + +#define ones Y12 + +#define rtmp1 Y13 +#define rtmp2 Y14 + +#define mem Y15 + +#define dig BX +#define cache CX +#define count DX +#define base SI +#define consts DI + +#define prepmask \ + VPXOR mask, mask, mask \ + VPCMPGTD mask, off, mask + +#define prep(index) \ + VMOVAPD mask, rtmp2 \ + VPGATHERDD rtmp2, index*4(base)(off*1), mem + +#define load(index) \ + VMOVAPD index*32(cache), mem + +#define store(index) \ + VMOVAPD mem, index*32(cache) + +#define roll(shift, a) \ + VPSLLD $shift, a, rtmp1 \ + VPSRLD $32-shift, a, a \ + VPOR rtmp1, a, a + +#define ROUND1(a, b, c, d, index, const, shift) \ + VPXOR c, tmp, tmp \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp, tmp \ + VPXOR d, tmp, tmp \ + prep(index) \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND1load(a, b, c, d, index, const, shift) \ + VXORPD c, tmp, tmp \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp, tmp \ + VPXOR d, tmp, tmp \ + load(index) \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND2(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp2, tmp2 \ + VANDNPD c, tmp, tmp \ + load(index) \ + VPOR tmp, tmp2, tmp2 \ + VMOVAPD c, tmp \ + VPADDD tmp2, a, a \ + VMOVAPD c, tmp2 \ + roll(shift,a) \ + VPADDD b, a, a + +#define ROUND3(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + load(index) \ + VPXOR d, tmp, tmp \ + VPXOR b, tmp, tmp \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD b, tmp \ + VPADDD b, a, a + +#define ROUND4(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPOR b, tmp, tmp \ + VPXOR c, tmp, tmp \ + VPADDD tmp, a, a \ + load(index) \ + roll(shift,a) \ + VPXOR c, ones, tmp \ + VPADDD b, a, a + + // load digest into state registers + VMOVUPD (dig), a + VMOVUPD 32(dig), b + VMOVUPD 64(dig), c + VMOVUPD 96(dig), d + + // load source buffer offsets + VMOVUPD (AX), off + + prepmask + VPCMPEQD ones, ones, ones + +loop: + VMOVAPD a, sa + VMOVAPD b, sb + VMOVAPD c, sc + VMOVAPD d, sd + + prep(0) + VMOVAPD d, tmp + store(0) + + ROUND1(a,b,c,d, 1,0x00, 7) + store(1) + ROUND1(d,a,b,c, 2,0x01,12) + store(2) + ROUND1(c,d,a,b, 3,0x02,17) + store(3) + ROUND1(b,c,d,a, 4,0x03,22) + store(4) + ROUND1(a,b,c,d, 5,0x04, 7) + store(5) + ROUND1(d,a,b,c, 6,0x05,12) + store(6) + ROUND1(c,d,a,b, 7,0x06,17) + store(7) + ROUND1(b,c,d,a, 8,0x07,22) + store(8) + ROUND1(a,b,c,d, 9,0x08, 7) + store(9) + ROUND1(d,a,b,c,10,0x09,12) + store(10) + ROUND1(c,d,a,b,11,0x0a,17) + store(11) + ROUND1(b,c,d,a,12,0x0b,22) + store(12) + ROUND1(a,b,c,d,13,0x0c, 7) + store(13) + ROUND1(d,a,b,c,14,0x0d,12) + store(14) + ROUND1(c,d,a,b,15,0x0e,17) + store(15) + ROUND1load(b,c,d,a, 1,0x0f,22) + + VMOVAPD d, tmp + VMOVAPD d, tmp2 + + ROUND2(a,b,c,d, 6,0x10, 5) + ROUND2(d,a,b,c,11,0x11, 9) + ROUND2(c,d,a,b, 0,0x12,14) + ROUND2(b,c,d,a, 5,0x13,20) + ROUND2(a,b,c,d,10,0x14, 5) + ROUND2(d,a,b,c,15,0x15, 9) + ROUND2(c,d,a,b, 4,0x16,14) + ROUND2(b,c,d,a, 9,0x17,20) + ROUND2(a,b,c,d,14,0x18, 5) + ROUND2(d,a,b,c, 3,0x19, 9) + ROUND2(c,d,a,b, 8,0x1a,14) + ROUND2(b,c,d,a,13,0x1b,20) + ROUND2(a,b,c,d, 2,0x1c, 5) + ROUND2(d,a,b,c, 7,0x1d, 9) + ROUND2(c,d,a,b,12,0x1e,14) + ROUND2(b,c,d,a, 0,0x1f,20) + + load(5) + VMOVAPD c, tmp + + ROUND3(a,b,c,d, 8,0x20, 4) + ROUND3(d,a,b,c,11,0x21,11) + ROUND3(c,d,a,b,14,0x22,16) + ROUND3(b,c,d,a, 1,0x23,23) + ROUND3(a,b,c,d, 4,0x24, 4) + ROUND3(d,a,b,c, 7,0x25,11) + ROUND3(c,d,a,b,10,0x26,16) + ROUND3(b,c,d,a,13,0x27,23) + ROUND3(a,b,c,d, 0,0x28, 4) + ROUND3(d,a,b,c, 3,0x29,11) + ROUND3(c,d,a,b, 6,0x2a,16) + ROUND3(b,c,d,a, 9,0x2b,23) + ROUND3(a,b,c,d,12,0x2c, 4) + ROUND3(d,a,b,c,15,0x2d,11) + ROUND3(c,d,a,b, 2,0x2e,16) + ROUND3(b,c,d,a, 0,0x2f,23) + + load(0) + VPXOR d, ones, tmp + + ROUND4(a,b,c,d, 7,0x30, 6) + ROUND4(d,a,b,c,14,0x31,10) + ROUND4(c,d,a,b, 5,0x32,15) + ROUND4(b,c,d,a,12,0x33,21) + ROUND4(a,b,c,d, 3,0x34, 6) + ROUND4(d,a,b,c,10,0x35,10) + ROUND4(c,d,a,b, 1,0x36,15) + ROUND4(b,c,d,a, 8,0x37,21) + ROUND4(a,b,c,d,15,0x38, 6) + ROUND4(d,a,b,c, 6,0x39,10) + ROUND4(c,d,a,b,13,0x3a,15) + ROUND4(b,c,d,a, 4,0x3b,21) + ROUND4(a,b,c,d,11,0x3c, 6) + ROUND4(d,a,b,c, 2,0x3d,10) + ROUND4(c,d,a,b, 9,0x3e,15) + ROUND4(b,c,d,a, 0,0x3f,21) + + VPADDD sa, a, a + VPADDD sb, b, b + VPADDD sc, c, c + VPADDD sd, d, d + + LEAQ 64(base), base + SUBQ $64, count + JNE loop + + VMOVUPD a, (dig) + VMOVUPD b, 32(dig) + VMOVUPD c, 64(dig) + VMOVUPD d, 96(dig) + + VZEROUPPER + RET diff --git a/vendor/github.com/minio/md5-simd/block_amd64.go b/vendor/github.com/minio/md5-simd/block_amd64.go new file mode 100644 index 00000000..16edda26 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block_amd64.go @@ -0,0 +1,210 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +import ( + "fmt" + "math" + "unsafe" + + "github.com/klauspost/cpuid/v2" +) + +var hasAVX512 bool + +func init() { + // VANDNPD requires AVX512DQ. Technically it could be VPTERNLOGQ which is AVX512F. + hasAVX512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ) +} + +//go:noescape +func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int) + +//go:noescape +func block16(state *uint32, base uintptr, ptrs *int32, mask uint64, n int) + +// 8-way 4x uint32 digests in 4 ymm registers +// (ymm0, ymm1, ymm2, ymm3) +type digest8 struct { + v0, v1, v2, v3 [8]uint32 +} + +// Stack cache for 8x64 byte md5.BlockSize bytes. +// Must be 32-byte aligned, so allocate 512+32 and +// align upwards at runtime. +type cache8 [512 + 32]byte + +// MD5 magic numbers for one lane of hashing; inflated +// 8x below at init time. +var md5consts = [64]uint32{ + 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, + 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, + 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, + 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, + 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, + 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, + 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, + 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, + 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, + 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, + 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, + 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, + 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, + 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, + 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, + 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391, +} + +// inflate the consts 8-way for 8x md5 (256 bit ymm registers) +var avx256md5consts = func(c []uint32) []uint32 { + inf := make([]uint32, 8*len(c)) + for i := range c { + for j := 0; j < 8; j++ { + inf[(i*8)+j] = c[i] + } + } + return inf +}(md5consts[:]) + +// 16-way 4x uint32 digests in 4 zmm registers +type digest16 struct { + v0, v1, v2, v3 [16]uint32 +} + +// inflate the consts 16-way for 16x md5 (512 bit zmm registers) +var avx512md5consts = func(c []uint32) []uint32 { + inf := make([]uint32, 16*len(c)) + for i := range c { + for j := 0; j < 16; j++ { + inf[(i*16)+j] = c[i] + } + } + return inf +}(md5consts[:]) + +// Interface function to assembly code +func (s *md5Server) blockMd5_x16(d *digest16, input [16][]byte, half bool) { + if hasAVX512 { + blockMd5_avx512(d, input, s.allBufs, &s.maskRounds16) + return + } + + // Preparing data using copy is slower since copies aren't inlined. + + // Calculate on this goroutine + if half { + for i := range s.i8[0][:] { + s.i8[0][i] = input[i] + } + for i := range s.d8a.v0[:] { + s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i] + } + blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a) + for i := range s.d8a.v0[:] { + d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] + } + return + } + + for i := range s.i8[0][:] { + s.i8[0][i], s.i8[1][i] = input[i], input[8+i] + } + + for i := range s.d8a.v0[:] { + j := (i + 8) & 15 + s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i] + s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] = d.v0[j], d.v1[j], d.v2[j], d.v3[j] + } + + // Benchmarks appears to be slightly faster when spinning up 2 goroutines instead + // of using the current for one of the blocks. + s.wg.Add(2) + go func() { blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a); s.wg.Done() }() + go func() { blockMd5_avx2(&s.d8b, s.i8[1], s.allBufs, &s.maskRounds8b); s.wg.Done() }() + s.wg.Wait() + for i := range s.d8a.v0[:] { + d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] + } + for i := range s.d8b.v0[:] { + j := (i + 8) & 15 + d.v0[j], d.v1[j], d.v2[j], d.v3[j] = s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] + } +} + +// Interface function to AVX512 assembly code +func blockMd5_avx512(s *digest16, input [16][]byte, base []byte, maskRounds *[16]maskRounds) { + baseMin := uint64(uintptr(unsafe.Pointer(&(base[0])))) + ptrs := [16]int32{} + + for i := range ptrs { + if len(input[i]) > 0 { + if len(input[i]) > internalBlockSize { + panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i)) + } + + off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin + if off > math.MaxUint32 { + panic(fmt.Sprintf("invalid buffer sent with offset %x", off)) + } + ptrs[i] = int32(off) + } + } + + sdup := *s // create copy of initial states to receive intermediate updates + + rounds := generateMaskAndRounds16(input, maskRounds) + + for r := 0; r < rounds; r++ { + m := maskRounds[r] + + block16(&sdup.v0[0], uintptr(baseMin), &ptrs[0], m.mask, int(64*m.rounds)) + + for j := 0; j < len(ptrs); j++ { + ptrs[j] += int32(64 * m.rounds) // update pointers for next round + if m.mask&(1< 0 { + if len(input[i]) > internalBlockSize { + panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i)) + } + + off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin + if off > math.MaxUint32 { + panic(fmt.Sprintf("invalid buffer sent with offset %x", off)) + } + ptrs[i] = int32(off) + } + } + + sdup := *s // create copy of initial states to receive intermediate updates + + rounds := generateMaskAndRounds8(input, maskRounds) + + for r := 0; r < rounds; r++ { + m := maskRounds[r] + var cache cache8 // stack storage for block8 tmp state + block8(&sdup.v0[0], uintptr(baseMin), &ptrs[0], &cache[0], int(64*m.rounds)) + + for j := 0; j < len(ptrs); j++ { + ptrs[j] += int32(64 * m.rounds) // update pointers for next round + if m.mask&(1< internalBlockSize { + l = internalBlockSize + } + nnn, err := d.write(p[:l]) + if err != nil { + return nn, err + } + nn += nnn + p = p[l:] + + if len(p) == 0 { + break + } + + } + return +} + +func (d *md5Digest) write(p []byte) (nn int, err error) { + + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == BlockSize { + // Create a copy of the overflow buffer in order to send it async over the channel + // (since we will modify the overflow buffer down below with any access beyond multiples of 64) + tmp := <-d.buffers + tmp = tmp[:BlockSize] + copy(tmp, d.x[:]) + d.sendBlock(blockInput{uid: d.uid, msg: tmp}, len(p)-n < BlockSize) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= BlockSize { + n := len(p) &^ (BlockSize - 1) + buf := <-d.buffers + buf = buf[:n] + copy(buf, p) + d.sendBlock(blockInput{uid: d.uid, msg: buf}, len(p)-n < BlockSize) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d *md5Digest) Close() { + if d.blocksCh != nil { + close(d.blocksCh) + d.blocksCh = nil + } +} + +var sumChPool sync.Pool + +func init() { + sumChPool.New = func() interface{} { + return make(chan sumResult, 1) + } +} + +// Sum - Return MD5 sum in bytes +func (d *md5Digest) Sum(in []byte) (result []byte) { + if d.blocksCh == nil { + panic("sum after close") + } + + trail := <-d.buffers + trail = append(trail[:0], d.x[:d.nx]...) + + length := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if length%64 < 56 { + trail = append(trail, tmp[0:56-length%64]...) + } else { + trail = append(trail, tmp[0:64+56-length%64]...) + } + + // Length in bits. + length <<= 3 + binary.LittleEndian.PutUint64(tmp[:], length) // append length in bits + + trail = append(trail, tmp[0:8]...) + if len(trail)%BlockSize != 0 { + panic(fmt.Errorf("internal error: sum block was not aligned. len=%d, nx=%d", len(trail), d.nx)) + } + sumCh := sumChPool.Get().(chan sumResult) + d.sendBlock(blockInput{uid: d.uid, msg: trail, sumCh: sumCh}, true) + + sum := <-sumCh + sumChPool.Put(sumCh) + + return append(in, sum.digest[:]...) +} + +// sendBlock will send a block for processing. +// If cycle is true we will block on cycle, otherwise we will only block +// if the block channel is full. +func (d *md5Digest) sendBlock(bi blockInput, cycle bool) { + if cycle { + select { + case d.blocksCh <- bi: + d.cycleServer <- d.uid + } + return + } + // Only block on cycle if we filled the buffer + select { + case d.blocksCh <- bi: + return + default: + d.cycleServer <- d.uid + d.blocksCh <- bi + } +} diff --git a/vendor/github.com/minio/md5-simd/md5-server_amd64.go b/vendor/github.com/minio/md5-simd/md5-server_amd64.go new file mode 100644 index 00000000..94f741c5 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-server_amd64.go @@ -0,0 +1,397 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +import ( + "encoding/binary" + "fmt" + "runtime" + "sync" + + "github.com/klauspost/cpuid/v2" +) + +// MD5 initialization constants +const ( + // Lanes is the number of concurrently calculated hashes. + Lanes = 16 + + init0 = 0x67452301 + init1 = 0xefcdab89 + init2 = 0x98badcfe + init3 = 0x10325476 + + // Use scalar routine when below this many lanes + useScalarBelow = 3 +) + +// md5ServerUID - Does not start at 0 but next multiple of 16 so as to be able to +// differentiate with default initialisation value of 0 +const md5ServerUID = Lanes + +const buffersPerLane = 3 + +// Message to send across input channel +type blockInput struct { + uid uint64 + msg []byte + sumCh chan sumResult + reset bool +} + +type sumResult struct { + digest [Size]byte +} + +type lanesInfo [Lanes]blockInput + +// md5Server - Type to implement parallel handling of MD5 invocations +type md5Server struct { + uidCounter uint64 + cycle chan uint64 // client with uid has update. + newInput chan newClient // Add new client. + digests map[uint64][Size]byte // Map of uids to (interim) digest results + maskRounds16 [16]maskRounds // Pre-allocated static array for max 16 rounds + maskRounds8a [8]maskRounds // Pre-allocated static array for max 8 rounds (1st AVX2 core) + maskRounds8b [8]maskRounds // Pre-allocated static array for max 8 rounds (2nd AVX2 core) + allBufs []byte // Preallocated buffer. + buffers chan []byte // Preallocated buffers, sliced from allBufs. + + i8 [2][8][]byte // avx2 temporary vars + d8a, d8b digest8 + wg sync.WaitGroup +} + +// NewServer - Create new object for parallel processing handling +func NewServer() Server { + if !cpuid.CPU.Supports(cpuid.AVX2) { + return &fallbackServer{} + } + md5srv := &md5Server{} + md5srv.digests = make(map[uint64][Size]byte) + md5srv.newInput = make(chan newClient, Lanes) + md5srv.cycle = make(chan uint64, Lanes*10) + md5srv.uidCounter = md5ServerUID - 1 + md5srv.allBufs = make([]byte, 32+buffersPerLane*Lanes*internalBlockSize) + md5srv.buffers = make(chan []byte, buffersPerLane*Lanes) + // Fill buffers. + for i := 0; i < buffersPerLane*Lanes; i++ { + s := 32 + i*internalBlockSize + md5srv.buffers <- md5srv.allBufs[s : s+internalBlockSize : s+internalBlockSize] + } + + // Start a single thread for reading from the input channel + go md5srv.process(md5srv.newInput) + return md5srv +} + +type newClient struct { + uid uint64 + input chan blockInput +} + +// process - Sole handler for reading from the input channel. +func (s *md5Server) process(newClients chan newClient) { + // To fill up as many lanes as possible: + // + // 1. Wait for a cycle id. + // 2. If not already in a lane, add, otherwise leave on channel + // 3. Start timer + // 4. Check if lanes is full, if so, goto 10 (process). + // 5. If timeout, goto 10. + // 6. Wait for new id (goto 2) or timeout (goto 10). + // 10. Process. + // 11. Check all input if there is already input, if so add to lanes. + // 12. Goto 1 + + // lanes contains the lanes. + var lanes lanesInfo + // lanesFilled contains the number of filled lanes for current cycle. + var lanesFilled int + // clients contains active clients + var clients = make(map[uint64]chan blockInput, Lanes) + + addToLane := func(uid uint64) { + cl, ok := clients[uid] + if !ok { + // Unknown client. Maybe it was already removed. + return + } + // Check if we already have it. + for _, lane := range lanes[:lanesFilled] { + if lane.uid == uid { + return + } + } + // Continue until we get a block or there is nothing on channel + for { + select { + case block, ok := <-cl: + if !ok { + // Client disconnected + delete(clients, block.uid) + return + } + if block.uid != uid { + panic(fmt.Errorf("uid mismatch, %d (block) != %d (client)", block.uid, uid)) + } + // If reset message, reset and we're done + if block.reset { + delete(s.digests, uid) + continue + } + + // If requesting sum, we will need to maintain state. + if block.sumCh != nil { + var dig digest + d, ok := s.digests[uid] + if ok { + dig.s[0] = binary.LittleEndian.Uint32(d[0:4]) + dig.s[1] = binary.LittleEndian.Uint32(d[4:8]) + dig.s[2] = binary.LittleEndian.Uint32(d[8:12]) + dig.s[3] = binary.LittleEndian.Uint32(d[12:16]) + } else { + dig.s[0], dig.s[1], dig.s[2], dig.s[3] = init0, init1, init2, init3 + } + + sum := sumResult{} + // Add end block to current digest. + blockScalar(&dig.s, block.msg) + + binary.LittleEndian.PutUint32(sum.digest[0:], dig.s[0]) + binary.LittleEndian.PutUint32(sum.digest[4:], dig.s[1]) + binary.LittleEndian.PutUint32(sum.digest[8:], dig.s[2]) + binary.LittleEndian.PutUint32(sum.digest[12:], dig.s[3]) + block.sumCh <- sum + if block.msg != nil { + s.buffers <- block.msg + } + continue + } + if len(block.msg) == 0 { + continue + } + lanes[lanesFilled] = block + lanesFilled++ + return + default: + return + } + } + } + addNewClient := func(cl newClient) { + if _, ok := clients[cl.uid]; ok { + panic("internal error: duplicate client registration") + } + clients[cl.uid] = cl.input + } + + allLanesFilled := func() bool { + return lanesFilled == Lanes || lanesFilled >= len(clients) + } + + for { + // Step 1. + for lanesFilled == 0 { + select { + case cl, ok := <-newClients: + if !ok { + return + } + addNewClient(cl) + // Check if it already sent a payload. + addToLane(cl.uid) + continue + case uid := <-s.cycle: + addToLane(uid) + } + } + + fillLanes: + for !allLanesFilled() { + select { + case cl, ok := <-newClients: + if !ok { + return + } + addNewClient(cl) + + case uid := <-s.cycle: + addToLane(uid) + default: + // Nothing more queued... + break fillLanes + } + } + + // If we did not fill all lanes, check if there is more waiting + if !allLanesFilled() { + runtime.Gosched() + for uid := range clients { + addToLane(uid) + if allLanesFilled() { + break + } + } + } + if false { + if !allLanesFilled() { + fmt.Println("Not all lanes filled", lanesFilled, "of", len(clients)) + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + } else if true { + fmt.Println("all lanes filled") + } + } + // Process the lanes we could collect + s.blocks(lanes[:lanesFilled]) + + // Clear lanes... + lanesFilled = 0 + // Add all current queued + for uid := range clients { + addToLane(uid) + if allLanesFilled() { + break + } + } + } +} + +func (s *md5Server) Close() { + if s.newInput != nil { + close(s.newInput) + s.newInput = nil + } +} + +// Invoke assembly and send results back +func (s *md5Server) blocks(lanes []blockInput) { + if len(lanes) < useScalarBelow { + // Use scalar routine when below this many lanes + switch len(lanes) { + case 0: + case 1: + lane := lanes[0] + var d digest + a, ok := s.digests[lane.uid] + if ok { + d.s[0] = binary.LittleEndian.Uint32(a[0:4]) + d.s[1] = binary.LittleEndian.Uint32(a[4:8]) + d.s[2] = binary.LittleEndian.Uint32(a[8:12]) + d.s[3] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.s[0] = init0 + d.s[1] = init1 + d.s[2] = init2 + d.s[3] = init3 + } + if len(lane.msg) > 0 { + // Update... + blockScalar(&d.s, lane.msg) + } + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], d.s[0]) + binary.LittleEndian.PutUint32(dig[4:], d.s[1]) + binary.LittleEndian.PutUint32(dig[8:], d.s[2]) + binary.LittleEndian.PutUint32(dig[12:], d.s[3]) + s.digests[lane.uid] = dig + + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[0] = blockInput{} + + default: + s.wg.Add(len(lanes)) + var results [useScalarBelow]digest + for i := range lanes { + lane := lanes[i] + go func(i int) { + var d digest + defer s.wg.Done() + a, ok := s.digests[lane.uid] + if ok { + d.s[0] = binary.LittleEndian.Uint32(a[0:4]) + d.s[1] = binary.LittleEndian.Uint32(a[4:8]) + d.s[2] = binary.LittleEndian.Uint32(a[8:12]) + d.s[3] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.s[0] = init0 + d.s[1] = init1 + d.s[2] = init2 + d.s[3] = init3 + } + if len(lane.msg) == 0 { + results[i] = d + return + } + // Update... + blockScalar(&d.s, lane.msg) + results[i] = d + }(i) + } + s.wg.Wait() + for i, lane := range lanes { + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], results[i].s[0]) + binary.LittleEndian.PutUint32(dig[4:], results[i].s[1]) + binary.LittleEndian.PutUint32(dig[8:], results[i].s[2]) + binary.LittleEndian.PutUint32(dig[12:], results[i].s[3]) + s.digests[lane.uid] = dig + + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[i] = blockInput{} + } + } + return + } + + inputs := [16][]byte{} + for i := range lanes { + inputs[i] = lanes[i].msg + } + + // Collect active digests... + state := s.getDigests(lanes) + // Process all lanes... + s.blockMd5_x16(&state, inputs, len(lanes) <= 8) + + for i, lane := range lanes { + uid := lane.uid + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], state.v0[i]) + binary.LittleEndian.PutUint32(dig[4:], state.v1[i]) + binary.LittleEndian.PutUint32(dig[8:], state.v2[i]) + binary.LittleEndian.PutUint32(dig[12:], state.v3[i]) + + s.digests[uid] = dig + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[i] = blockInput{} + } +} + +func (s *md5Server) getDigests(lanes []blockInput) (d digest16) { + for i, lane := range lanes { + a, ok := s.digests[lane.uid] + if ok { + d.v0[i] = binary.LittleEndian.Uint32(a[0:4]) + d.v1[i] = binary.LittleEndian.Uint32(a[4:8]) + d.v2[i] = binary.LittleEndian.Uint32(a[8:12]) + d.v3[i] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.v0[i] = init0 + d.v1[i] = init1 + d.v2[i] = init2 + d.v3[i] = init3 + } + } + return +} diff --git a/vendor/github.com/minio/md5-simd/md5-server_fallback.go b/vendor/github.com/minio/md5-simd/md5-server_fallback.go new file mode 100644 index 00000000..7814dada --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-server_fallback.go @@ -0,0 +1,12 @@ +//+build !amd64 appengine !gc noasm + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +// NewServer - Create new object for parallel processing handling +func NewServer() *fallbackServer { + return &fallbackServer{} +} diff --git a/vendor/github.com/minio/md5-simd/md5-util_amd64.go b/vendor/github.com/minio/md5-simd/md5-util_amd64.go new file mode 100644 index 00000000..73981b0e --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-util_amd64.go @@ -0,0 +1,85 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +// Helper struct for sorting blocks based on length +type lane struct { + len uint + pos uint +} + +type digest struct { + s [4]uint32 +} + +// Helper struct for generating number of rounds in combination with mask for valid lanes +type maskRounds struct { + mask uint64 + rounds uint64 +} + +func generateMaskAndRounds8(input [8][]byte, mr *[8]maskRounds) (rounds int) { + // Sort on blocks length small to large + var sorted [8]lane + for c, inpt := range input[:] { + sorted[c] = lane{uint(len(inpt)), uint(c)} + for i := c - 1; i >= 0; i-- { + // swap so largest is at the end... + if sorted[i].len > sorted[i+1].len { + sorted[i], sorted[i+1] = sorted[i+1], sorted[i] + continue + } + break + } + } + + // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks + m, round := uint64(0xff), uint64(0) + + for _, s := range sorted[:] { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round} + rounds++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + return +} + +func generateMaskAndRounds16(input [16][]byte, mr *[16]maskRounds) (rounds int) { + // Sort on blocks length small to large + var sorted [16]lane + for c, inpt := range input[:] { + sorted[c] = lane{uint(len(inpt)), uint(c)} + for i := c - 1; i >= 0; i-- { + // swap so largest is at the end... + if sorted[i].len > sorted[i+1].len { + sorted[i], sorted[i+1] = sorted[i+1], sorted[i] + continue + } + break + } + } + + // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks + m, round := uint64(0xffff), uint64(0) + + for _, s := range sorted[:] { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round} + rounds++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + return +} diff --git a/vendor/github.com/minio/md5-simd/md5.go b/vendor/github.com/minio/md5-simd/md5.go new file mode 100644 index 00000000..11b0cb96 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5.go @@ -0,0 +1,63 @@ +package md5simd + +import ( + "crypto/md5" + "hash" + "sync" +) + +const ( + // The blocksize of MD5 in bytes. + BlockSize = 64 + + // The size of an MD5 checksum in bytes. + Size = 16 + + // internalBlockSize is the internal block size. + internalBlockSize = 32 << 10 +) + +type Server interface { + NewHash() Hasher + Close() +} + +type Hasher interface { + hash.Hash + Close() +} + +// StdlibHasher returns a Hasher that uses the stdlib for hashing. +// Used hashers are stored in a pool for fast reuse. +func StdlibHasher() Hasher { + return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)} +} + +// md5Wrapper is a wrapper around the builtin hasher. +type md5Wrapper struct { + hash.Hash +} + +var md5Pool = sync.Pool{New: func() interface{} { + return md5.New() +}} + +// fallbackServer - Fallback when no assembly is available. +type fallbackServer struct { +} + +// NewHash -- return regular Golang md5 hashing from crypto +func (s *fallbackServer) NewHash() Hasher { + return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)} +} + +func (s *fallbackServer) Close() { +} + +func (m *md5Wrapper) Close() { + if m.Hash != nil { + m.Reset() + md5Pool.Put(m.Hash) + m.Hash = nil + } +} diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.go b/vendor/github.com/minio/md5-simd/md5block_amd64.go new file mode 100644 index 00000000..4c279366 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5block_amd64.go @@ -0,0 +1,11 @@ +// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT. + +// +build !appengine +// +build !noasm +// +build gc + +package md5simd + +// Encode p to digest +//go:noescape +func blockScalar(dig *[4]uint32, p []byte) diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.s b/vendor/github.com/minio/md5-simd/md5block_amd64.s new file mode 100644 index 00000000..fbc4a21f --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5block_amd64.s @@ -0,0 +1,714 @@ +// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT. + +// +build !appengine +// +build !noasm +// +build gc + +// func blockScalar(dig *[4]uint32, p []byte) +TEXT ·blockScalar(SB), $0-32 + MOVQ p_len+16(FP), AX + MOVQ dig+0(FP), CX + MOVQ p_base+8(FP), DX + SHRQ $0x06, AX + SHLQ $0x06, AX + LEAQ (DX)(AX*1), AX + CMPQ DX, AX + JEQ end + MOVL (CX), BX + MOVL 4(CX), BP + MOVL 8(CX), SI + MOVL 12(CX), CX + MOVL $0xffffffff, DI + +loop: + MOVL (DX), R8 + MOVL CX, R9 + MOVL BX, R10 + MOVL BP, R11 + MOVL SI, R12 + MOVL CX, R13 + + // ROUND1 + XORL SI, R9 + ADDL $0xd76aa478, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 4(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0xe8c7b756, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 8(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0x242070db, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 12(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0xc1bdceee, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 16(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0xf57c0faf, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 20(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0x4787c62a, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 24(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xa8304613, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 28(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0xfd469501, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 32(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0x698098d8, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 36(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0x8b44f7af, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 40(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xffff5bb1, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 44(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0x895cd7be, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 48(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0x6b901122, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 52(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0xfd987193, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 56(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xa679438e, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 60(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0x49b40821, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 4(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + + // ROUND2 + MOVL CX, R9 + MOVL CX, R14 + XORL DI, R9 + ADDL $0xf61e2562, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 24(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xc040b340, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 44(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0x265e5a51, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL (DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0xe9b6c7aa, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 20(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0xd62f105d, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 40(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0x02441453, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 60(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0xd8a1e681, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 16(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0xe7d3fbc8, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 36(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0x21e1cde6, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 56(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xc33707d6, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 12(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0xf4d50d87, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 32(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0x455a14ed, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 52(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0xa9e3e905, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 8(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xfcefa3f8, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 28(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0x676f02d9, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 48(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0x8d2a4c8a, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 20(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + + // ROUND3 + MOVL SI, R9 + ADDL $0xfffa3942, BX + ADDL R8, BX + MOVL 32(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0x8771f681, CX + ADDL R8, CX + MOVL 44(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0x6d9d6122, SI + ADDL R8, SI + MOVL 56(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xfde5380c, BP + ADDL R8, BP + MOVL 4(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0xa4beea44, BX + ADDL R8, BX + MOVL 16(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0x4bdecfa9, CX + ADDL R8, CX + MOVL 28(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0xf6bb4b60, SI + ADDL R8, SI + MOVL 40(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xbebfbc70, BP + ADDL R8, BP + MOVL 52(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0x289b7ec6, BX + ADDL R8, BX + MOVL (DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0xeaa127fa, CX + ADDL R8, CX + MOVL 12(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0xd4ef3085, SI + ADDL R8, SI + MOVL 24(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0x04881d05, BP + ADDL R8, BP + MOVL 36(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0xd9d4d039, BX + ADDL R8, BX + MOVL 48(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0xe6db99e5, CX + ADDL R8, CX + MOVL 60(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0x1fa27cf8, SI + ADDL R8, SI + MOVL 8(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xc4ac5665, BP + ADDL R8, BP + MOVL (DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + + // ROUND4 + MOVL DI, R9 + XORL CX, R9 + ADDL $0xf4292244, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 28(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0x432aff97, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 56(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xab9423a7, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 20(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0xfc93a039, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 48(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0x655b59c3, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 12(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0x8f0ccc92, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 40(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xffeff47d, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 4(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0x85845dd1, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 32(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0x6fa87e4f, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 60(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0xfe2ce6e0, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 24(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xa3014314, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 52(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0x4e0811a1, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 16(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0xf7537e82, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 44(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0xbd3af235, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 8(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0x2ad7d2bb, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 36(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0xeb86d391, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + ROLL $0x15, BP + ADDL SI, BP + ADDL R10, BX + ADDL R11, BP + ADDL R12, SI + ADDL R13, CX + + // Prepare next loop + ADDQ $0x40, DX + CMPQ DX, AX + JB loop + + // Write output + MOVQ dig+0(FP), AX + MOVL BX, (AX) + MOVL BP, 4(AX) + MOVL SI, 8(AX) + MOVL CX, 12(AX) + +end: + RET diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore new file mode 100644 index 00000000..8081bd0f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -0,0 +1,4 @@ +*~ +*.test +validator +golangci-lint \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml new file mode 100644 index 00000000..dfc0c2d5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + misspell: + locale: US + +linters: + disable-all: true + enable: + - typecheck + - goimports + - misspell + - revive + - govet + - ineffassign + - gosimple + - deadcode + - structcheck + - gocritic + +issues: + exclude-use-default: false + exclude: + # todo fix these when we get enough time. + - "singleCaseSwitch: should rewrite switch statement to if statement" + - "unlambda: replace" + - "captLocal:" + - "ifElseChain:" + - "elseif:" diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME new file mode 100644 index 00000000..d365a7bb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CNAME @@ -0,0 +1 @@ +minio-go.min.io \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md new file mode 100644 index 00000000..24522ef7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md @@ -0,0 +1,22 @@ +### Developer Guidelines + +``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +* When you're ready to create a pull request, be sure to: + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. + NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables + ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` + +* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/v7/LICENSE b/vendor/github.com/minio/minio-go/v7/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md new file mode 100644 index 00000000..f640dfb9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md @@ -0,0 +1,35 @@ +# For maintainers only + +## Responsibilities + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases +Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. +```sh +$ export GNUPGHOME=/media/${USER}/minio/trusted +$ git tag -s 4.0.0 +$ git push +$ git push --tags +``` + +### Update version +Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. + +```sh +$ grep libraryVersion api.go + libraryVersion = "4.0.1" +``` + +Commit your changes +``` +$ git commit -a -m "Update version for next release" --author "MinIO Trusted " +``` + +### Announce +Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. + +To generate `changelog` +```sh +$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. +``` diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile new file mode 100644 index 00000000..ac4a328f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -0,0 +1,35 @@ +GOPATH := $(shell go env GOPATH) +TMPDIR := $(shell mktemp -d) + +all: checks + +.PHONY: examples docs + +checks: lint vet test examples functional-test + +lint: + @mkdir -p ${GOPATH}/bin + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2 + @echo "Running $@ check" + @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean + @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml + +vet: + @GO111MODULE=on go vet ./... + +test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + +examples: + @echo "Building s3 examples" + @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + @echo "Building minio examples" + @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + +functional-test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go + +clean: + @echo "Cleaning up all the generated files" + @find . -name '*.test' | xargs rm -fv + @find . -name '*~' | xargs rm -fv diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE new file mode 100644 index 00000000..1e8fd3b9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/NOTICE @@ -0,0 +1,9 @@ +MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc. + +This product includes software developed at MinIO, Inc. +(https://min.io/). + +The MinIO project contains unmodified/modified subcomponents too with +separate copyright notices and license terms. Your use of the source +code for these subcomponents is subject to the terms and conditions +of Apache License Version 2.0 diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md new file mode 100644 index 00000000..4e3abf71 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -0,0 +1,246 @@ +# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) + +The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. + +This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). + +This document assumes that you have a working [Go development environment](https://golang.org/doc/install). + +## Download from Github +```sh +go get github.com/minio/minio-go/v7 +``` + +## Initialize MinIO Client +MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. + +| Parameter | Description| +| :--- | :--- | +| endpoint | URL to object storage service. | +| _minio.Options_ | All the options such as credentials, custom transport etc. | + +```go +package main + +import ( + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient is now set up +} +``` + +## Quick Start Example - File Uploader +This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. + +We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. + +### FileUploader.go +```go +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + ctx := context.Background() + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + // Make a new bucket called mymusic. + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + // Check to see if we already own this bucket (which happens if you run this twice) + exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // Upload the zip file + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // Upload the zip file with FPutObject + info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) +} +``` + +### Run FileUploader +```sh +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` + +## API Reference +The full API Reference is available here. + +* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) + +### API Reference : Bucket Operations +* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) +* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API Reference : Bucket policy Operations +* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) + +### API Reference : Bucket notification Operations +* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) +* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension) + +### API Reference : File Object Operations +* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) + +### API Reference : Object Operations +* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) +* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) + + +### API Reference : Presigned Operations +* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API Reference : Client custom settings +* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff) + +## Full Examples + +### Full Examples : Bucket Operations +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### Full Examples : Bucket policy Operations +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### Full Examples : Bucket lifecycle Operations +* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + +### Full Examples : Bucket encryption Operations +* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) +* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) +* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) + +### Full Examples : Bucket replication Operations +* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) +* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) +* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) + +### Full Examples : Bucket notification Operations +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) +* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) + +### Full Examples : File Object Operations +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) + +### Full Examples : Object Operations +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### Full Examples : Encrypted Object Operations +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### Full Examples : Presigned Operations +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## Explore Further +* [Complete Documentation](https://docs.min.io) +* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) + +## Contribute +[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) + +## License +This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md new file mode 100644 index 00000000..64e79341 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md @@ -0,0 +1,260 @@ +# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) + +MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 + +**支持的云存储:** + +- AWS Signature Version 4 + - Amazon S3 + - MinIO + +- AWS Signature Version 2 + - Google Cloud Storage (兼容模式) + - Openstack Swift + Swift3 middleware + - Ceph Object Gateway + - Riak CS + +本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 + +本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 + +## 从Github下载 +```sh +go get -u github.com/minio/minio-go +``` + +## 初始化MinIO Client +MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。 + +| 参数 | 描述| +| :--- | :--- | +| endpoint | 对象存储服务的URL | +| accessKeyID | Access key是唯一标识你的账户的用户ID。 | +| secretAccessKey | Secret key是你账户的密码。 | +| secure | true代表使用HTTPS | + + +```go +package main + +import ( + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // 初使化 minio client对象。 + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient初使化成功 +} +``` + +## 示例-文件上传 +本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 + +我们在本示例中使用运行在 [https://play.min.io](https://play.min.io) 上的MinIO服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 + +### FileUploader.go +```go +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + ctx := context.Background() + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // 初使化 minio client对象。 + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + // 创建一个叫mymusic的存储桶。 + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + // 检查存储桶是否已经存在。 + exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // 上传一个zip文件。 + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // 使用FPutObject上传一个zip文件。 + n, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, n) +} +``` + +### 运行FileUploader +```sh +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` + +## API文档 +完整的API文档在这里。 +* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) + +### API文档 : 操作存储桶 +* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) +* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API文档 : 存储桶策略 +* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) + +### API文档 : 存储桶通知 +* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展) +* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展) + +### API文档 : 操作文件对象 +* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) + +### API文档 : 操作对象 +* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) +* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) + +### API文档 : Presigned操作 +* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API文档 : 客户端自定义设置 +* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) + +## 完整示例 + +### 完整示例 : 操作存储桶 +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### 完整示例 : 存储桶策略 +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### 完整示例 : 存储桶生命周期 +* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + +### 完整示例 : 存储桶加密 +* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) +* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) +* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) + +### 完整示例 : 存储桶复制 +* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) +* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) +* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) + +### 完整示例 : 存储桶通知 +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展) +* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO 扩展) + +### 完整示例 : 操作文件对象 +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) + +### 完整示例 : 操作对象 +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### 完整示例 : 操作加密对象 +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### 完整示例 : Presigned操作 +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## 了解更多 +* [完整文档](https://docs.min.io) +* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) + +## 贡献 +[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go new file mode 100644 index 00000000..24f94e03 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go @@ -0,0 +1,134 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/sse" +) + +// SetBucketEncryption sets the default encryption configuration on an existing bucket. +func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if config == nil { + return errInvalidArgument("configuration cannot be empty") + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. +func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // DELETE default encryption configuration on a bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// GetBucketEncryption gets the default encryption configuration +// on an existing bucket with a context to control cancellations and timeouts. +func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Execute GET on bucket to get the default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + encryptionConfig := &sse.Configuration{} + if err = xmlDecoder(resp.Body, encryptionConfig); err != nil { + return nil, err + } + + return encryptionConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go new file mode 100644 index 00000000..7e219973 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go @@ -0,0 +1,147 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/lifecycle" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketLifecycle set the lifecycle on an existing bucket. +func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If lifecycle is empty then delete it. + if config.Empty() { + return c.removeBucketLifecycle(ctx, bucketName) + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Save the updated lifecycle. + return c.putBucketLifecycle(ctx, bucketName, buf) +} + +// Saves a new bucket lifecycle. +func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Content-length is mandatory for put lifecycle request + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket lifecycle. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Remove lifecycle from a bucket. +func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketLifecycle fetch bucket lifecycle configuration +func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + bucketLifecycle, err := c.getBucketLifecycle(ctx, bucketName) + if err != nil { + return nil, err + } + + config := lifecycle.NewConfiguration() + if err = xml.Unmarshal(bucketLifecycle, config); err != nil { + return nil, err + } + return config, nil +} + +// Request server for current bucket lifecycle. +func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute GET on bucket to get lifecycle. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go new file mode 100644 index 00000000..dc37b0c0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -0,0 +1,254 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. +func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(&config) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Base64: sumMD5Base64(notifBytes), + contentSHA256Hex: sum256Hex(notifBytes), + } + + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { + return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) +} + +// GetBucketNotification returns current bucket notification configuration +func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return notification.Configuration{}, err + } + return c.getBucketNotification(ctx, bucketName) +} + +// Request server for notification rules. +func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return notification.Configuration{}, err + } + return processBucketNotificationResponse(bucketName, resp) +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return notification.Configuration{}, errResponse + } + var bucketNotification notification.Configuration + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return notification.Configuration{}, err + } + return bucketNotification, nil +} + +// ListenNotification listen for all events, this is a MinIO specific API +func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { + return c.ListenBucketNotification(ctx, "", prefix, suffix, events) +} + +// ListenBucketNotification listen for bucket events, this is a MinIO specific API +func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { + notificationInfoCh := make(chan notification.Info, 1) + const notificationCapacity = 4 * 1024 * 1024 + notificationEventBuffer := make([]byte, notificationCapacity) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- notification.Info) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if bucketName != "" { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { + select { + case notificationInfoCh <- notification.Info{ + Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), + }: + case <-ctx.Done(): + } + return + } + + // Continuously run and listen on bucket notification. + // Create a done channel to control 'ListObjects' go routine. + retryDoneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(retryDoneCh) + + // Prepare urlValues to pass into the request on every loop + urlValues := make(url.Values) + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + select { + case notificationInfoCh <- notification.Info{ + Err: errResponse, + }: + case <-ctx.Done(): + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Use a higher buffer to support unexpected + // caching done by proxies + bio.Buffer(notificationEventBuffer, notificationCapacity) + json := jsoniter.ConfigCompatibleWithStandardLibrary + + // Unmarshal each line, returns marshaled values. + for bio.Scan() { + var notificationInfo notification.Info + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + // Unexpected error during json unmarshal, send + // the error to caller for actionable as needed. + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + closeResponse(resp) + continue + } + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-ctx.Done(): + closeResponse(resp) + return + } + } + + if err = bio.Err(); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + } + + // Close current connection before looping further. + closeResponse(resp) + + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go new file mode 100644 index 00000000..e7edf9c9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -0,0 +1,147 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketPolicy sets the access permissions on an existing bucket. +func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If policy is empty then delete the bucket policy. + if policy == "" { + return c.removeBucketPolicy(ctx, bucketName) + } + + // Save the updated policies. + return c.putBucketPolicy(ctx, bucketName, policy) +} + +// Saves a new bucket policy. +func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: strings.NewReader(policy), + contentLength: int64(len(policy)), + } + + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// GetBucketPolicy returns the current policy +func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return "", nil + } + return "", err + } + return bucketPolicy, nil +} + +// Request server for current bucket policy. +func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + policy := string(bucketPolicyBuf) + return policy, err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go new file mode 100644 index 00000000..461984e3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -0,0 +1,288 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/replication" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RemoveBucketReplication removes a replication config on an existing bucket. +func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { + return c.removeBucketReplication(ctx, bucketName) +} + +// SetBucketReplication sets a replication config on an existing bucket. +func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If replication is empty then delete it. + if cfg.Empty() { + return c.removeBucketReplication(ctx, bucketName) + } + // Save the updated replication. + return c.putBucketReplication(ctx, bucketName, cfg) +} + +// Saves a new bucket replication. +func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + replication, err := xml.Marshal(cfg) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(replication), + contentLength: int64(len(replication)), + contentMD5Base64: sumMD5Base64(replication), + } + + // Execute PUT to upload a new bucket replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// Remove replication from a bucket. +func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketReplication fetches bucket replication configuration.If config is not +// found, returns empty config with nil error. +func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return cfg, err + } + bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "ReplicationConfigurationNotFoundError" { + return cfg, nil + } + return cfg, err + } + return bucketReplicationCfg, nil +} + +// Request server for current bucket replication config. +func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return cfg, err + } + + if resp.StatusCode != http.StatusOK { + return cfg, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = xmlDecoder(resp.Body, &cfg); err != nil { + return cfg, err + } + + return cfg, nil +} + +// GetBucketReplicationMetrics fetches bucket replication status metrics +func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return s, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-metrics", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return s, err + } + + if resp.StatusCode != http.StatusOK { + return s, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return s, err + } + + if err := json.Unmarshal(respBytes, &s); err != nil { + return s, err + } + return s, nil +} + +// mustGetUUID - get a random UUID. +func mustGetUUID() string { + u, err := uuid.NewRandom() + if err != nil { + return "" + } + return u.String() +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { + rID = mustGetUUID() + _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) + if err != nil { + return rID, err + } + return rID, nil +} + +// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if +// ExistingObjectReplication is enabled in the replication config +func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) { + return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID()) +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset", "") + if olderThan > 0 { + urlValues.Set("older-than", olderThan.String()) + } + if tgtArn != "" { + urlValues.Set("arn", tgtArn) + } + urlValues.Set("reset-id", resetID) + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return rinfo, err + } + + if resp.StatusCode != http.StatusOK { + return rinfo, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { + return rinfo, err + } + return rinfo, nil +} + +// GetBucketReplicationResyncStatus gets the status of replication resync +func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return rinfo, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset-status", "") + if arn != "" { + urlValues.Set("arn", arn) + } + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return rinfo, err + } + + if resp.StatusCode != http.StatusOK { + return rinfo, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { + return rinfo, err + } + return rinfo, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go new file mode 100644 index 00000000..1615f8f8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -0,0 +1,135 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// GetBucketTagging fetch tagging configuration for a bucket with a +// context to control cancellations and timeouts. +func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute GET on bucket to get tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + defer io.Copy(ioutil.Discard, resp.Body) + return tags.ParseBucketXML(resp.Body) +} + +// SetBucketTagging sets tagging configuration for a bucket +// with a context to control cancellations and timeouts. +func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if tags == nil { + return errors.New("nil tags passed") + } + + buf, err := xml.Marshal(tags) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT on bucket to put tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketTagging removes tagging configuration for a +// bucket with a context to control cancellations and timeouts. +func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute DELETE on bucket to remove tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go new file mode 100644 index 00000000..930b1b93 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go @@ -0,0 +1,137 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketVersioning sets a bucket versioning configuration +func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + contentSHA256Hex: sum256Hex(buf), + } + + // Execute PUT to set a bucket versioning. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// EnableVersioning - enable object versioning in given bucket. +func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) +} + +// SuspendVersioning - suspend object versioning in given bucket. +func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) +} + +// BucketVersioningConfiguration is the versioning configuration structure +type BucketVersioningConfiguration struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` + MFADelete string `xml:"MfaDelete,omitempty"` +} + +// Various supported states +const ( + Enabled = "Enabled" + // Disabled State = "Disabled" only used by MFA Delete not supported yet. + Suspended = "Suspended" +) + +// Enabled returns true if bucket versioning is enabled +func (b BucketVersioningConfiguration) Enabled() bool { + return b.Status == Enabled +} + +// Suspended returns true if bucket versioning is suspended +func (b BucketVersioningConfiguration) Suspended() bool { + return b.Status == Suspended +} + +// GetBucketVersioning gets the versioning configuration on +// an existing bucket with a context to control cancellations and timeouts. +func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return BucketVersioningConfiguration{}, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + // Execute GET on bucket to get the versioning configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return BucketVersioningConfiguration{}, err + } + + if resp.StatusCode != http.StatusOK { + return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "") + } + + versioningConfig := BucketVersioningConfiguration{} + if err = xmlDecoder(resp.Body, &versioningConfig); err != nil { + return versioningConfig, err + } + + return versioningConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go new file mode 100644 index 00000000..b59924a3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -0,0 +1,592 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs +type CopyDestOptions struct { + Bucket string // points to destination bucket + Object string // points to destination object + + // `Encryption` is the key info for server-side-encryption with customer + // provided key. If it is nil, no encryption is performed. + Encryption encrypt.ServerSide + + // `userMeta` is the user-metadata key-value pairs to be set on the + // destination. The keys are automatically prefixed with `x-amz-meta-` + // if needed. If nil is passed, and if only a single source (of any + // size) is provided in the ComposeObject call, then metadata from the + // source is copied to the destination. + // if no user-metadata is provided, it is copied from source + // (when there is only once source object in the compose + // request) + UserMetadata map[string]string + // UserMetadata is only set to destination if ReplaceMetadata is true + // other value is UserMetadata is ignored and we preserve src.UserMetadata + // NOTE: if you set this value to true and now metadata is present + // in UserMetadata your destination object will not have any metadata + // set. + ReplaceMetadata bool + + // `userTags` is the user defined object tags to be set on destination. + // This will be set only if the `replaceTags` field is set to true. + // Otherwise this field is ignored + UserTags map[string]string + ReplaceTags bool + + // Specifies whether you want to apply a Legal Hold to the copied object. + LegalHold LegalHoldStatus + + // Object Retention related fields + Mode RetentionMode + RetainUntilDate time.Time + + Size int64 // Needs to be specified if progress bar is specified. + // Progress of the entire copy operation will be sent here. + Progress io.Reader +} + +// Process custom-metadata to remove a `x-amz-meta-` prefix if +// present and validate that keys are distinct (after this +// prefix removal). +func filterCustomMeta(userMeta map[string]string) map[string]string { + m := make(map[string]string) + for k, v := range userMeta { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + k = k[len("x-amz-meta-"):] + } + if _, ok := m[k]; ok { + continue + } + m[k] = v + } + return m +} + +// Marshal converts all the CopyDestOptions into their +// equivalent HTTP header representation +func (opts CopyDestOptions) Marshal(header http.Header) { + const replaceDirective = "REPLACE" + if opts.ReplaceTags { + header.Set(amzTaggingHeaderDirective, replaceDirective) + if tags := s3utils.TagEncode(opts.UserTags); tags != "" { + header.Set(amzTaggingHeader, tags) + } + } + + if opts.LegalHold != LegalHoldStatus("") { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + + if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() { + header.Set(amzLockMode, opts.Mode.String()) + header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339)) + } + + if opts.Encryption != nil { + opts.Encryption.Marshal(header) + } + + if opts.ReplaceMetadata { + header.Set("x-amz-metadata-directive", replaceDirective) + for k, v := range filterCustomMeta(opts.UserMetadata) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } + } +} + +// toDestinationInfo returns a validated copyOptions object. +func (opts CopyDestOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Progress != nil && opts.Size < 0 { + return errInvalidArgument("For progress bar effective size needs to be specified") + } + return nil +} + +// CopySrcOptions represents a source object to be copied, using +// server-side copying APIs. +type CopySrcOptions struct { + Bucket, Object string + VersionID string + MatchETag string + NoMatchETag string + MatchModifiedSince time.Time + MatchUnmodifiedSince time.Time + MatchRange bool + Start, End int64 + Encryption encrypt.ServerSide +} + +// Marshal converts all the CopySrcOptions into their +// equivalent HTTP header representation +func (opts CopySrcOptions) Marshal(header http.Header) { + // Set the source header + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)) + if opts.VersionID != "" { + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID) + } + + if opts.MatchETag != "" { + header.Set("x-amz-copy-source-if-match", opts.MatchETag) + } + if opts.NoMatchETag != "" { + header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag) + } + + if !opts.MatchModifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat)) + } + if !opts.MatchUnmodifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat)) + } + + if opts.Encryption != nil { + encrypt.SSECopy(opts.Encryption).Marshal(header) + } +} + +func (opts CopySrcOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Start > opts.End || opts.Start < 0 { + return errInvalidArgument("start must be non-negative, and start must be at most end.") + } + return nil +} + +// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. +func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, + metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions, +) (ObjectInfo, error) { + // Build headers. + headers := make(http.Header) + + // Set all the metadata headers. + for k, v := range metadata { + headers.Set(k, v) + } + if !dstOpts.Internal.ReplicationStatus.Empty() { + headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus)) + } + if !dstOpts.Internal.SourceMTime.IsZero() { + headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano)) + } + if dstOpts.Internal.SourceETag != "" { + headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) + } + if dstOpts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "") + } + if !dstOpts.Internal.LegalholdTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.RetentionTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.TaggingTimestamp.IsZero() { + headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + + if len(dstOpts.UserTags) != 0 { + headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags)) + } + + reqMetadata := requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + } + if dstOpts.Internal.SourceVersionID != "" { + if dstOpts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { + return ObjectInfo{}, errInvalidArgument(err.Error()) + } + } + urlValues := make(url.Values) + urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) + reqMetadata.queryValues = urlValues + } + + // Set the source header + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + if srcOpts.VersionID != "" { + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID) + } + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) + } + + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return ObjectInfo{}, err + } + + objInfo := ObjectInfo{ + Key: destObject, + ETag: strings.Trim(cpObjRes.ETag, "\""), + LastModified: cpObjRes.LastModified, + } + return objInfo, nil +} + +func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset int64, length int64, metadata map[string]string, +) (p CompletePart, err error) { + headers := make(http.Header) + + // Set source + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + if startOffset < 0 { + return p, errInvalidArgument("startOffset must be non-negative") + } + + if length >= 0 { + headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) + } + + for k, v := range metadata { + headers.Set(k, v) + } + + queryValues := make(url.Values) + queryValues.Set("partNumber", strconv.Itoa(partID)) + queryValues.Set("uploadId", uploadID) + + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + queryValues: queryValues, + }) + defer closeResponse(resp) + if err != nil { + return + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, destBucket, destObject) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partID, cpObjRes.ETag + return p, nil +} + +// uploadPartCopy - helper function to create a part in a multipart +// upload via an upload-part-copy request +// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html +func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, + headers http.Header, +) (p CompletePart, err error) { + // Build query parameters + urlValues := make(url.Values) + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("uploadId", uploadID) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucket, + objectName: object, + customHeader: headers, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return p, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, bucket, object) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partNumber, cpObjRes.ETag + return p, nil +} + +// ComposeObject - creates an object using server-side copying +// of existing objects. It takes a list of source objects (with optional offsets) +// and concatenates them into a new object using only server-side copying +// operations. Optionally takes progress reader hook for applications to +// look at current progress. +func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { + if len(srcs) < 1 || len(srcs) > maxPartsCount { + return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") + } + + for _, src := range srcs { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err + } + + srcObjectInfos := make([]ObjectInfo, len(srcs)) + srcObjectSizes := make([]int64, len(srcs)) + var totalSize, totalParts int64 + var err error + for i, src := range srcs { + opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} + srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts) + if err != nil { + return UploadInfo{}, err + } + + srcCopySize := srcObjectInfos[i].Size + // Check if a segment is specified, and if so, is the + // segment within object bounds? + if src.MatchRange { + // Since range is specified, + // 0 <= src.start <= src.end + // so only invalid case to check is: + if src.End >= srcCopySize || src.Start < 0 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)", + i, src.Start, src.End, srcCopySize)) + } + srcCopySize = src.End - src.Start + 1 + } + + // Only the last source may be less than `absMinPartSize` + if srcCopySize < absMinPartSize && i < len(srcs)-1 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize)) + } + + // Is data to copy too large? + totalSize += srcCopySize + if totalSize > maxMultipartPutObjectSize { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) + } + + // record source size + srcObjectSizes[i] = srcCopySize + + // calculate parts needed for current source + totalParts += partsRequired(srcCopySize) + // Do we need more parts than we are allowed? + if totalParts > maxPartsCount { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf( + "Your proposed compose object requires more than %d parts", maxPartsCount)) + } + } + + // Single source object case (i.e. when only one source is + // involved, it is being copied wholly and at most 5GiB in + // size, emptyfiles are also supported). + if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { + return c.CopyObject(ctx, dst, srcs[0]) + } + + // Now, handle multipart-copy cases. + + // 1. Ensure that the object has not been changed while + // we are copying data. + for i, src := range srcs { + src.MatchETag = srcObjectInfos[i].ETag + } + + // 2. Initiate a new multipart upload. + + // Set user-metadata on the destination object. If no + // user-metadata is specified, and there is only one source, + // (only) then metadata from source is copied. + var userMeta map[string]string + if dst.ReplaceMetadata { + userMeta = dst.UserMetadata + } else { + userMeta = srcObjectInfos[0].UserMetadata + } + + var userTags map[string]string + if dst.ReplaceTags { + userTags = dst.UserTags + } else { + userTags = srcObjectInfos[0].UserTags + } + + uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{ + ServerSideEncryption: dst.Encryption, + UserMetadata: userMeta, + UserTags: userTags, + Mode: dst.Mode, + RetainUntilDate: dst.RetainUntilDate, + LegalHold: dst.LegalHold, + }) + if err != nil { + return UploadInfo{}, err + } + + // 3. Perform copy part uploads + objParts := []CompletePart{} + partIndex := 1 + for i, src := range srcs { + h := make(http.Header) + src.Marshal(h) + if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { + dst.Encryption.Marshal(h) + } + + // calculate start/end indices of parts after + // splitting. + startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src) + for j, start := range startIdx { + end := endIdx[j] + + // Add (or reset) source range header for + // upload part copy request. + h.Set("x-amz-copy-source-range", + fmt.Sprintf("bytes=%d-%d", start, end)) + + // make upload-part-copy request + complPart, err := c.uploadPartCopy(ctx, dst.Bucket, + dst.Object, uploadID, partIndex, h) + if err != nil { + return UploadInfo{}, err + } + if dst.Progress != nil { + io.CopyN(ioutil.Discard, dst.Progress, end-start+1) + } + objParts = append(objParts, complPart) + partIndex++ + } + } + + // 4. Make final complete-multipart request. + uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, + completeMultipartUpload{Parts: objParts}, PutObjectOptions{}) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalSize + return uploadInfo, nil +} + +// partsRequired is maximum parts possible with +// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) +func partsRequired(size int64) int64 { + maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) + r := size / int64(maxPartSize) + if size%int64(maxPartSize) > 0 { + r++ + } + return r +} + +// calculateEvenSplits - computes splits for a source and returns +// start and end index slices. Splits happen evenly to be sure that no +// part is less than 5MiB, as that could fail the multipart request if +// it is not the last part. +func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { + if size == 0 { + return + } + + reqParts := partsRequired(size) + startIndex = make([]int64, reqParts) + endIndex = make([]int64, reqParts) + // Compute number of required parts `k`, as: + // + // k = ceiling(size / copyPartSize) + // + // Now, distribute the `size` bytes in the source into + // k parts as evenly as possible: + // + // r parts sized (q+1) bytes, and + // (k - r) parts sized q bytes, where + // + // size = q * k + r (by simple division of size by k, + // so that 0 <= r < k) + // + start := src.Start + if start == -1 { + start = 0 + } + quot, rem := size/reqParts, size%reqParts + nextStart := start + for j := int64(0); j < reqParts; j++ { + curPartSize := quot + if j < rem { + curPartSize++ + } + + cStart := nextStart + cEnd := cStart + curPartSize - 1 + nextStart = cEnd + 1 + + startIndex[j], endIndex[j] = cStart, cEnd + } + return +} diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go new file mode 100644 index 00000000..1c0ad2be --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "io/ioutil" + "net/http" +) + +// CopyObject - copy a source object into a new object +func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err + } + + header := make(http.Header) + dst.Marshal(header) + src.Marshal(header) + + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: dst.Bucket, + objectName: dst.Object, + customHeader: header, + }) + if err != nil { + return UploadInfo{}, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object) + } + + // Update the progress properly after successful copy. + if dst.Progress != nil { + io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size)) + } + + cpObjRes := copyObjectResult{} + if err = xmlDecoder(resp.Body, &cpObjRes); err != nil { + return UploadInfo{}, err + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: dst.Bucket, + Key: dst.Object, + LastModified: cpObjRes.LastModified, + ETag: trimEtag(resp.Header.Get("ETag")), + VersionID: resp.Header.Get(amzVersionID), + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go new file mode 100644 index 00000000..2f5912f3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -0,0 +1,177 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "io" + "net/http" + "time" +) + +// BucketInfo container for bucket metadata. +type BucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` +} + +// StringMap represents map with custom UnmarshalXML +type StringMap map[string]string + +// UnmarshalXML unmarshals the XML into a map of string to strings, +// creating a key in the map for each tag and setting it's value to the +// tags contents. +// +// The fact this function is on the pointer of Map is important, so that +// if m is nil it can be initialized, which is often the case if m is +// nested in another xml structural. This is also why the first thing done +// on the first line is initialize it. +func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + *m = StringMap{} + type xmlMapEntry struct { + XMLName xml.Name + Value string `xml:",chardata"` + } + for { + var e xmlMapEntry + err := d.Decode(&e) + if err == io.EOF { + break + } else if err != nil { + return err + } + (*m)[e.XMLName.Local] = e.Value + } + return nil +} + +// Owner name. +type Owner struct { + XMLName xml.Name `xml:"Owner" json:"owner"` + DisplayName string `xml:"ID" json:"name"` + ID string `xml:"DisplayName" json:"id"` +} + +// UploadInfo contains information about the +// newly uploaded or copied object. +type UploadInfo struct { + Bucket string + Key string + ETag string + Size int64 + LastModified time.Time + Location string + VersionID string + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string +} + +// RestoreInfo contains information of the restore operation of an archived object +type RestoreInfo struct { + // Is the restoring operation is still ongoing + OngoingRestore bool + // When the restored copy of the archived object will be removed + ExpiryTime time.Time +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata" xml:"-"` + + // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. + UserMetadata StringMap `json:"userMetadata"` + + // x-amz-tagging values in their k/v values. + UserTags map[string]string `json:"userTags"` + + // x-amz-tagging-count value + UserTagCount int + + // Owner name. + Owner Owner + + // ACL grant. + Grant []Grant + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Versioning related information + IsLatest bool + IsDeleteMarker bool + VersionID string `xml:"VersionId"` + + // x-amz-replication-status value is either in one of the following states + // - COMPLETE + // - PENDING + // - FAILED + // - REPLICA (on the destination) + ReplicationStatus string `xml:"ReplicationStatus"` + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string + + Restore *RestoreInfo + + // Error + Err error `json:"-"` +} + +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + + // Size in bytes of the object. + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go new file mode 100644 index 00000000..dd781cae --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -0,0 +1,295 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + Resource string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Captures the server string returned in response header. + Server string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. +// +// For example: +// +// import s3 "github.com/minio/minio-go/v7" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// Error - Returns S3 error string. +func (e ErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// Common string for errors to report issue location in unexpected +// cases. +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Empty http response. " + reportIssue + return errInvalidArgument(msg) + } + + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + Server: resp.Header.Get("Server"), + } + + errBody, err := xmlDecodeAndBody(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. + if err != nil { + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, + } + } else { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, + } + } + case http.StatusForbidden: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, + } + case http.StatusConflict: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + } + case http.StatusPreconditionFailed: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "PreconditionFailed", + Message: s3ErrorResponseMap["PreconditionFailed"], + BucketName: bucketName, + Key: objectName, + } + default: + msg := resp.Status + if len(errBody) > 0 { + msg = string(errBody) + if len(msg) > 1024 { + msg = msg[:1024] + "..." + } + } + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: resp.Status, + Message: msg, + BucketName: bucketName, + } + } + } + + // Save hostID, requestID and region information + // from headers if not available through error XML. + if errResp.RequestID == "" { + errResp.RequestID = resp.Header.Get("x-amz-request-id") + } + if errResp.HostID == "" { + errResp.HostID = resp.Header.Get("x-amz-id-2") + } + if errResp.Region == "" { + errResp.Region = resp.Header.Get("x-amz-bucket-region") + } + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) + } + + return errResp +} + +// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func errTransferAccelerationBucket(bucketName string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", + BucketName: bucketName, + } +} + +// errEntityTooLarge - Input size is larger than supported maximum. +func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errEntityTooSmall - Input size is smaller than supported minimum. +func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooSmall", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errUnexpectedEOF - Unexpected end of file reached. +func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "UnexpectedEOF", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errInvalidBucketName - Invalid bucket name response. +func errInvalidBucketName(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: message, + RequestID: "minio", + } +} + +// errInvalidObjectName - Invalid object name response. +func errInvalidObjectName(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotFound, + Code: "NoSuchKey", + Message: message, + RequestID: "minio", + } +} + +// errInvalidArgument - Invalid argument response. +func errInvalidArgument(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} + +// errAPINotSupported - API not supported response +// The specified API call is not supported +func errAPINotSupported(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotImplemented, + Code: "APINotSupported", + Message: message, + RequestID: "minio", + } +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go new file mode 100644 index 00000000..9041d99e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go @@ -0,0 +1,152 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/xml" + "net/http" + "net/url" +) + +// Grantee represents the person being granted permissions. +type Grantee struct { + XMLName xml.Name `xml:"Grantee"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` +} + +// Grant holds grant information +type Grant struct { + XMLName xml.Name `xml:"Grant"` + Grantee Grantee + Permission string `xml:"Permission"` +} + +// AccessControlList contains the set of grantees and the permissions assigned to each grantee. +type AccessControlList struct { + XMLName xml.Name `xml:"AccessControlList"` + Grant []Grant + Permission string `xml:"Permission"` +} + +type accessControlPolicy struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner + AccessControlList AccessControlList +} + +// GetObjectACL get object ACLs +func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: url.Values{ + "acl": []string{""}, + }, + }) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + res := &accessControlPolicy{} + + if err := xmlDecoder(resp.Body, res); err != nil { + return nil, err + } + + objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{}) + if err != nil { + return nil, err + } + + objInfo.Owner.DisplayName = res.Owner.DisplayName + objInfo.Owner.ID = res.Owner.ID + + objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...) + + cannedACL := getCannedACL(res) + if cannedACL != "" { + objInfo.Metadata.Add("X-Amz-Acl", cannedACL) + return &objInfo, nil + } + + grantACL := getAmzGrantACL(res) + for k, v := range grantACL { + objInfo.Metadata[k] = v + } + + return &objInfo, nil +} + +func getCannedACL(aCPolicy *accessControlPolicy) string { + grants := aCPolicy.AccessControlList.Grant + + switch { + case len(grants) == 1: + if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { + return "private" + } + case len(grants) == 2: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return "authenticated-read" + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + return "public-read" + } + if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { + return "bucket-owner-read" + } + } + case len(grants) == 3: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + return "public-read-write" + } + } + } + return "" +} + +func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { + grants := aCPolicy.AccessControlList.Grant + res := map[string][]string{} + + for _, g := range grants { + switch { + case g.Permission == "READ": + res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) + case g.Permission == "WRITE": + res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) + case g.Permission == "READ_ACP": + res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) + case g.Permission == "WRITE_ACP": + res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) + case g.Permission == "FULL_CONTROL": + res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) + } + } + return res +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go new file mode 100644 index 00000000..2332dbf1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go @@ -0,0 +1,127 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// FGetObject - download contents of an object to a local file. +// The options can be used to specify the GET request further. +func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return errInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level directory. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0o700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filePath + objectStat.ETag + ".part.minio" + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) + if err != nil { + return err + } + + // If we return early with an error, be sure to close and delete + // filePart. If we have an error along the way there is a chance + // that filePart is somehow damaged, and we should discard it. + closeAndRemove := true + defer func() { + if closeAndRemove { + _ = filePart.Close() + _ = os.Remove(filePartPath) + } + }() + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Initialize get object request headers to set the + // appropriate range offsets to read from. + if st.Size() > 0 { + opts.SetRange(st.Size(), 0) + } + + // Seek to current position for incoming reader. + objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + closeAndRemove = false + if err = filePart.Close(); err != nil { + return err + } + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go new file mode 100644 index 00000000..2ce4b260 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -0,0 +1,685 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "sync" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// GetObject wrapper function that accepts a request context +func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + // Detect if snowball is server location we are talking to. + var snowball bool + if location, ok := c.bucketLocCache.Get(bucketName); ok { + if location == "snowball" { + snowball = true + } + } + + var ( + err error + httpReader io.ReadCloser + objectInfo ObjectInfo + totalRead int + ) + + // Create request channel. + reqCh := make(chan getRequest) + // Create response channel. + resCh := make(chan getResponse) + // Create done channel. + doneCh := make(chan struct{}) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(reqCh) + defer close(resCh) + + // Used to verify if etag of object has changed since last read. + var etag string + + // Loop through the incoming control messages and read data. + for { + select { + // When the done channel is closed exit our routine. + case <-doneCh: + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + return + + // Gather incoming request. + case req := <-reqCh: + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{Error: err} + return + } + etag = objectInfo.ETag + // Read at least firstReq.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(size) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + Size: size, + Error: err, + didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + objectInfo, err = c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + etag = objectInfo.ETag + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + objectInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { // Range is set with respect to the offset. + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + totalRead = 0 + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(totalRead) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + + // Reply back how much was read. + resCh <- getResponse{ + Size: size, + Error: err, + didRead: true, + objectInfo: objectInfo, + } + } + } + } + }() + + // Create a newObject through the information sent back by reqCh. + return newObject(reqCh, resCh, doneCh), nil +} + +// get request message container to communicate with internal +// go-routine. +type getRequest struct { + Buffer []byte + Offset int64 // readAt offset. + DidOffsetChange bool // Tracks the offset changes for Seek requests. + beenRead bool // Determines if this is the first time an object is being read. + isReadAt bool // Determines if this request is a request to a specific range + isReadOp bool // Determines if this request is a Read or Read/At request. + isFirstReq bool // Determines if this request is the first time an object is being accessed. + settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. +} + +// get response message container to reply back for the request. +type getResponse struct { + Size int + Error error + didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. + objectInfo ObjectInfo // Used for the first request. +} + +// Object represents an open object. It implements +// Reader, ReaderAt, Seeker, Closer for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- getRequest + resCh <-chan getResponse + doneCh chan<- struct{} + currOffset int64 + objectInfo ObjectInfo + + // Ask lower level to initiate data fetching based on currOffset + seekData bool + + // Keeps track of closed call. + isClosed bool + + // Keeps track of if this is the first call. + isStarted bool + + // Previous error saved for future calls. + prevErr error + + // Keeps track of if this object has been read yet. + beenRead bool + + // Keeps track of if objectInfo has been set yet. + objectInfoSet bool +} + +// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. +// Returns back the size of the buffer read, if anything was read, as well +// as any error encountered. For all first requests sent on the object +// it is also responsible for sending back the objectInfo. +func (o *Object) doGetRequest(request getRequest) (getResponse, error) { + o.reqCh <- request + response := <-o.resCh + + // Return any error to the top level. + if response.Error != nil { + return response, response.Error + } + + // This was the first request. + if !o.isStarted { + // The object has been operated on. + o.isStarted = true + } + // Set the objectInfo if the request was not readAt + // and it hasn't been set before. + if !o.objectInfoSet && !request.isReadAt { + o.objectInfo = response.objectInfo + o.objectInfoSet = true + } + // Set beenRead only if it has not been set before. + if !o.beenRead { + o.beenRead = response.didRead + } + // Data are ready on the wire, no need to reinitiate connection in lower level + o.seekData = false + + return response, nil +} + +// setOffset - handles the setting of offsets for +// Read/ReadAt/Seek requests. +func (o *Object) setOffset(bytesRead int64) error { + // Update the currentOffset. + o.currOffset += bytesRead + + if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { + return io.EOF + } + return nil +} + +// Read reads up to len(b) bytes into b. It returns the number of +// bytes read (0 <= n <= len(b)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is previous error saved from previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // Create a new request. + readReq := getRequest{ + isReadOp: true, + beenRead: o.beenRead, + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readReq.isFirstReq = true + } + + // Ask to establish a new data fetch routine based on seekData flag + readReq.DidOffsetChange = o.seekData + readReq.Offset = o.currOffset + + // Send and receive from the first request. + response, err := o.doGetRequest(readReq) + if err != nil && err != io.EOF { + // Save the error for future calls. + o.prevErr = err + return response.Size, err + } + + // Bytes read. + bytesRead := int64(response.Size) + + // Set the new offset. + oerr := o.setOffset(bytesRead) + if oerr != nil { + // Save the error for future calls. + o.prevErr = oerr + return response.Size, oerr + } + + // Return the response. + return response.Size, err +} + +// Stat returns the ObjectInfo structure describing Object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, errInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + // This is the first request. + if !o.isStarted || !o.objectInfoSet { + // Send the request and get the response. + _, err := o.doGetRequest(getRequest{ + isFirstReq: !o.isStarted, + settingObjectInfo: !o.objectInfoSet, + }) + if err != nil { + o.prevErr = err + return ObjectInfo{}, err + } + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is error which was saved in previous operation. + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return 0, o.prevErr + } + + // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. + o.currOffset = offset + + // Can only compare offsets to size when size has been set. + if o.objectInfoSet { + // If offset is negative than we return io.EOF. + // If offset is greater than or equal to object size we return io.EOF. + if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { + return 0, io.EOF + } + } + + // Create the new readAt request. + readAtReq := getRequest{ + isReadOp: true, + isReadAt: true, + DidOffsetChange: true, // Offset always changes. + beenRead: o.beenRead, // Set if this is the first request to try and read. + Offset: offset, // Set the offset. + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readAtReq.isFirstReq = true + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readAtReq) + if err != nil && err != io.EOF { + // Save the error. + o.prevErr = err + return response.Size, err + } + // Bytes read. + bytesRead := int64(response.Size) + // There is no valid objectInfo yet + // to compare against for EOF. + if !o.objectInfoSet { + // Update the currentOffset. + o.currOffset += bytesRead + } else { + // If this was not the first request update + // the offsets and compare against objectInfo + // for EOF. + oerr := o.setOffset(bytesRead) + if oerr != nil { + o.prevErr = oerr + return response.Size, oerr + } + } + return response.Size, err +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != nil && o.prevErr != io.EOF { + return 0, o.prevErr + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) + } + + // This is the first request. So before anything else + // get the ObjectInfo. + if !o.isStarted || !o.objectInfoSet { + // Create the new Seek request. + seekReq := getRequest{ + isReadOp: false, + Offset: offset, + isFirstReq: true, + } + // Send and receive from the seek request. + _, err := o.doGetRequest(seekReq) + if err != nil { + // Save the error. + o.prevErr = err + return 0, err + } + } + + // Switch through whence. + switch whence { + default: + return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset = offset + case 1: + if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset += offset + case 2: + // If we don't know the object size return an error for io.SeekEnd + if o.objectInfo.Size < 0 { + return 0, errInvalidArgument("Whence END is not supported when the object size is unknown") + } + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + o.currOffset = o.objectInfo.Size + offset + } + // Reset the saved error since we successfully seeked, let the Read + // and ReadAt decide. + if o.prevErr == io.EOF { + o.prevErr = nil + } + + // Ask lower level to fetch again from source + o.seekData = true + + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return errInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + close(o.doneCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +// ObjectInfo will be set by setObjectInfo +func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { + return &Object{ + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + // Validate input arguments. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, nil, err + } + + urlValues := make(url.Values) + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + if opts.PartNumber > 0 { + urlValues.Set("partNumber", strconv.Itoa(opts.PartNumber)) + } + + // Execute GET on objectName. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + return nil, ObjectInfo{}, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header) + if err != nil { + closeResponse(resp) + return nil, ObjectInfo{}, nil, err + } + + // do not close body here, caller will close + return resp.Body, objectStat, resp.Header, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go new file mode 100644 index 00000000..184ef9f8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -0,0 +1,141 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// AdvancedGetOptions for internal use by MinIO server - not intended for client use. +type AdvancedGetOptions struct { + ReplicationDeleteMarker bool + ReplicationProxyRequest string +} + +// GetObjectOptions are used to specify additional headers or options +// during GET requests. +type GetObjectOptions struct { + headers map[string]string + ServerSideEncryption encrypt.ServerSide + VersionID string + PartNumber int + // To be not used by external applications + Internal AdvancedGetOptions +} + +// StatObjectOptions are used to specify additional headers or options +// during GET info/stat requests. +type StatObjectOptions = GetObjectOptions + +// Header returns the http.Header representation of the GET options. +func (o GetObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + // this header is set for active-active replication scenario where GET/HEAD + // to site A is proxy'd to site B if object/version missing on site A. + if o.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest) + } + return headers +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *GetObjectOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// SetMatchETag - set match etag. +func (o *GetObjectOptions) SetMatchETag(etag string) error { + if etag == "" { + return errInvalidArgument("ETag cannot be empty.") + } + o.Set("If-Match", "\""+etag+"\"") + return nil +} + +// SetMatchETagExcept - set match etag except. +func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { + if etag == "" { + return errInvalidArgument("ETag cannot be empty.") + } + o.Set("If-None-Match", "\""+etag+"\"") + return nil +} + +// SetUnmodified - set unmodified time since. +func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return errInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetModified - set modified time since. +func (o *GetObjectOptions) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return errInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetRange - set the start and end offset of the object to be read. +// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. +func (o *GetObjectOptions) SetRange(start, end int64) error { + switch { + case start == 0 && end < 0: + // Read last '-end' bytes. `bytes=-N`. + o.Set("Range", fmt.Sprintf("bytes=%d", end)) + case 0 < start && end == 0: + // Read everything starting from offset + // 'start'. `bytes=N-`. + o.Set("Range", fmt.Sprintf("bytes=%d-", start)) + case 0 <= start && start <= end: + // Read everything starting at 'start' till the + // 'end'. `bytes=N-M` + o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + default: + // All other cases such as + // bytes=-3- + // bytes=5-3 + // bytes=-2-4 + // bytes=-3-0 + // bytes=-3--2 + // are invalid. + return errInvalidArgument( + fmt.Sprintf( + "Invalid range specified: start=%d end=%d", + start, end)) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go new file mode 100644 index 00000000..9b2b00ae --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -0,0 +1,972 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets(context.Background()) { +// fmt.Println(message) +// } +// +func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { + // Execute GET on service. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +// Bucket List Operations. +func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Return object owner information by default + fetchOwner := true + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save continuationToken for next request. + var continuationToken string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, + fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + object.ETag = trimEtag(object.ETag) + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case objectStatCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If continuation token present, save it for next request. + if result.NextContinuationToken != "" { + continuationToken = result.NextContinuationToken + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?continuation-token - Used to continue iterating over a set of objects +// ?metadata - Specifies if we want metadata for the objects as part of list operation. +// ?delimiter - A delimiter is a character you use to group keys. +// ?start-after - Sets a marker to start listing lexically at this key onwards. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketV2Result{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketV2Result{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Always set list-type in ListObjects V2 + urlValues.Set("list-type", "2") + + if metadata { + urlValues.Set("metadata", "true") + } + + // Set this conditionally if asked + if startAfter != "" { + urlValues.Set("start-after", startAfter) + } + + // Always set encoding-type in ListObjects V2 + urlValues.Set("encoding-type", "url") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set continuation token + if continuationToken != "" { + urlValues.Set("continuation-token", continuationToken) + } + + // Fetch owner when listing + if fetchOwner { + urlValues.Set("fetch-owner", "true") + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketV2Result{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode listBuckets XML. + listBucketResult := ListBucketV2Result{} + if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { + return listBucketResult, err + } + + // This is an additional verification check to make + // sure proper responses are received. + if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { + return listBucketResult, ErrorResponse{ + Code: "NotImplemented", + Message: "Truncated response should have continuation token set", + } + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + // Success. + return listBucketResult, nil +} + +func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + + marker := opts.StartAfter + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case objectStatCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + // Allocate new list objects channel. + resultCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(resultCh) + resultCh <- ObjectInfo{ + Err: err, + } + return resultCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + defer close(resultCh) + resultCh <- ObjectInfo{ + Err: err, + } + return resultCh + } + + // Initiate list objects goroutine here. + go func(resultCh chan<- ObjectInfo) { + defer close(resultCh) + + var ( + keyMarker = "" + versionIDMarker = "" + ) + + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers) + if err != nil { + resultCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, version := range result.Versions { + info := ObjectInfo{ + ETag: trimEtag(version.ETag), + Key: version.Key, + LastModified: version.LastModified, + Size: version.Size, + Owner: version.Owner, + StorageClass: version.StorageClass, + IsLatest: version.IsLatest, + VersionID: version.VersionID, + IsDeleteMarker: version.isDeleteMarker, + } + select { + // Send object version info. + case resultCh <- info: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case resultCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If next key marker is present, save it for next request. + if result.NextKeyMarker != "" { + keyMarker = result.NextKeyMarker + } + + // If next version id marker is present, save it for next request. + if result.NextVersionIDMarker != "" { + versionIDMarker = result.NextVersionIDMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(resultCh) + return resultCh +} + +// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects +// and their versions in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?key-marker - Specifies the key to start with when listing objects in a bucket. +// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListVersionsResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil { + return ListVersionsResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set versions to trigger versioning API + urlValues.Set("versions", "") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Set version ID marker + if versionIDMarker != "" { + urlValues.Set("version-id-marker", versionIDMarker) + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ListVersionsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode ListVersionsResult XML. + listObjectVersionsOutput := ListVersionsResult{} + err = xmlDecoder(resp.Body, &listObjectVersionsOutput) + if err != nil { + return ListVersionsResult{}, err + } + + for i, obj := range listObjectVersionsOutput.Versions { + listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + for i, obj := range listObjectVersionsOutput.CommonPrefixes { + listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + if listObjectVersionsOutput.NextKeyMarker != "" { + listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + return listObjectVersionsOutput, nil +} + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if objectMarker != "" { + urlValues.Set("marker", objectMarker) + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := ListBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + if listBucketResult.NextMarker != "" { + listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + return listBucketResult, nil +} + +// ListObjectsOptions holds all options of a list object request +type ListObjectsOptions struct { + // Include objects versions in the listing + WithVersions bool + // Include objects metadata in the listing + WithMetadata bool + // Only list objects with the prefix + Prefix string + // Ignore '/' delimiter + Recursive bool + // The maximum number of objects requested per + // batch, advanced use-case not useful for most + // applications + MaxKeys int + // StartAfter start listing lexically at this + // object onwards, this value can also be set + // for Marker when `UseV1` is set to true. + StartAfter string + + // Use the deprecated list objects V1 API + UseV1 bool + + headers http.Header +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *ListObjectsOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(http.Header) + } + o.headers.Set(key, value) +} + +// ListObjects returns objects list after evaluating the passed options. +// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } +// +func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + if opts.WithVersions { + return c.listObjectVersions(ctx, bucketName, opts) + } + + // Use legacy list objects v1 API + if opts.UseV1 { + return c.listObjects(ctx, bucketName, opts) + } + + // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. + if location, ok := c.bucketLocCache.Get(bucketName); ok { + if location == "snowball" { + return c.listObjects(ctx, bucketName, opts) + } + } + + return c.listObjectsV2(ctx, bucketName, opts) +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) +} + +// listIncompleteUploads lists all incomplete uploads. +func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { + defer close(objectMultipartStatCh) + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return + } + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If the context is canceled + case <-ctx.Done(): + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: + // If context is canceled. + case <-ctx.Done(): + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh +} + +// listMultipartUploadsQuery - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request parameters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + // Set upload id marker. + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // maxUploads should be 1000 or less. + if maxUploads > 0 { + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + } + + // Execute GET on bucketName to list multipart uploads. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := ListMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + for i, obj := range listMultipartUploadsResult.Uploads { + listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + for i, obj := range listMultipartUploadsResult.CommonPrefixes { + listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]ObjectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = trimEtag(part.ETag) + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. +func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { + var uploadIDs []string + // Make list incomplete uploads recursive. + isRecursive := true + // List all incomplete uploads. + for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) { + if mpUpload.Err != nil { + return nil, mpUpload.Err + } + if objectName == mpUpload.Key { + uploadIDs = append(uploadIDs, mpUpload.UploadID) + } + } + // Return the latest upload id. + return uploadIDs, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload +// +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request parameters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. +func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts > 0 { + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + } + + // Execute GET on objectName to get list of parts. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := ListObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} + +// Decode an S3 object name according to the encoding type +func decodeS3Name(name, encodingType string) (string, error) { + switch encodingType { + case "url": + return url.QueryUnescape(name) + default: + return name, nil + } +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go new file mode 100644 index 00000000..0c027d55 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go @@ -0,0 +1,176 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectLegalHold - object legal hold specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html +type objectLegalHold struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"LegalHold"` + Status LegalHoldStatus `xml:"Status,omitempty"` +} + +// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call +type PutObjectLegalHoldOptions struct { + VersionID string + Status *LegalHoldStatus +} + +// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call +type GetObjectLegalHoldOptions struct { + VersionID string +} + +// LegalHoldStatus - object legal hold status. +type LegalHoldStatus string + +const ( + // LegalHoldEnabled indicates legal hold is enabled + LegalHoldEnabled LegalHoldStatus = "ON" + + // LegalHoldDisabled indicates legal hold is disabled + LegalHoldDisabled LegalHoldStatus = "OFF" +) + +func (r LegalHoldStatus) String() string { + return string(r) +} + +// IsValid - check whether this legal hold status is valid or not. +func (r LegalHoldStatus) IsValid() bool { + return r == LegalHoldEnabled || r == LegalHoldDisabled +} + +func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { + if status == nil { + return nil, fmt.Errorf("Status not set") + } + if !status.IsValid() { + return nil, fmt.Errorf("invalid legal hold status `%v`", status) + } + legalHold := &objectLegalHold{ + Status: *status, + } + return legalHold, nil +} + +// PutObjectLegalHold : sets object legal hold for a given object and versionID. +func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + lh, err := newObjectLegalHold(opts.Status) + if err != nil { + return err + } + + lhData, err := xml.Marshal(lh) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(lhData), + contentLength: int64(len(lhData)), + contentMD5Base64: sumMD5Base64(lhData), + contentSHA256Hex: sum256Hex(lhData), + } + + // Execute PUT Object Legal Hold. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectLegalHold gets legal-hold status of given object. +func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + lh := &objectLegalHold{} + if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil { + return nil, err + } + + return &lh.Status, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go new file mode 100644 index 00000000..f0a43985 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go @@ -0,0 +1,241 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RetentionMode - object retention mode. +type RetentionMode string + +const ( + // Governance - governance mode. + Governance RetentionMode = "GOVERNANCE" + + // Compliance - compliance mode. + Compliance RetentionMode = "COMPLIANCE" +) + +func (r RetentionMode) String() string { + return string(r) +} + +// IsValid - check whether this retention mode is valid or not. +func (r RetentionMode) IsValid() bool { + return r == Governance || r == Compliance +} + +// ValidityUnit - retention validity unit. +type ValidityUnit string + +const ( + // Days - denotes no. of days. + Days ValidityUnit = "DAYS" + + // Years - denotes no. of years. + Years ValidityUnit = "YEARS" +) + +func (unit ValidityUnit) String() string { + return string(unit) +} + +// IsValid - check whether this validity unit is valid or not. +func (unit ValidityUnit) isValid() bool { + return unit == Days || unit == Years +} + +// Retention - bucket level retention configuration. +type Retention struct { + Mode RetentionMode + Validity time.Duration +} + +func (r Retention) String() string { + return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) +} + +// IsEmpty - returns whether retention is empty or not. +func (r Retention) IsEmpty() bool { + return r.Mode == "" || r.Validity == 0 +} + +// objectLockConfig - object lock configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectLockConfig struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"ObjectLockConfiguration"` + ObjectLockEnabled string `xml:"ObjectLockEnabled"` + Rule *struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + } `xml:"Rule,omitempty"` +} + +func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { + config := &objectLockConfig{ + ObjectLockEnabled: "Enabled", + } + + if mode != nil && validity != nil && unit != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + + if !unit.isValid() { + return nil, fmt.Errorf("invalid validity unit `%v`", unit) + } + + config.Rule = &struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + }{} + + config.Rule.DefaultRetention.Mode = *mode + if *unit == Days { + config.Rule.DefaultRetention.Days = validity + } else { + config.Rule.DefaultRetention.Years = validity + } + + return config, nil + } + + if mode == nil && validity == nil && unit == nil { + return config, nil + } + + return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") +} + +// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + config, err := newObjectLockConfig(mode, validity, unit) + if err != nil { + return err + } + + configData, err := xml.Marshal(config) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(configData), + contentLength: int64(len(configData)), + contentMD5Base64: sumMD5Base64(configData), + contentSHA256Hex: sum256Hex(configData), + } + + // Execute PUT bucket object lock configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// GetObjectLockConfig gets object lock configuration of given bucket. +func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", nil, nil, nil, err + } + + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return "", nil, nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + config := &objectLockConfig{} + if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { + return "", nil, nil, nil, err + } + + if config.Rule != nil { + mode = &config.Rule.DefaultRetention.Mode + if config.Rule.DefaultRetention.Days != nil { + validity = config.Rule.DefaultRetention.Days + days := Days + unit = &days + } else { + validity = config.Rule.DefaultRetention.Years + years := Years + unit = &years + } + return config.ObjectLockEnabled, mode, validity, unit, nil + } + return config.ObjectLockEnabled, nil, nil, nil, nil +} + +// GetBucketObjectLockConfig gets object lock configuration of given bucket. +func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) + return mode, validity, unit, err +} + +// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go new file mode 100644 index 00000000..b29cb1f8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go @@ -0,0 +1,165 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectRetention - object retention specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectRetention struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"Retention"` + Mode RetentionMode `xml:"Mode,omitempty"` + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"` +} + +func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { + objectRetention := &objectRetention{} + + if date != nil && !date.IsZero() { + objectRetention.RetainUntilDate = date + } + if mode != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + objectRetention.Mode = *mode + } + + return objectRetention, nil +} + +// PutObjectRetentionOptions represents options specified by user for PutObject call +type PutObjectRetentionOptions struct { + GovernanceBypass bool + Mode *RetentionMode + RetainUntilDate *time.Time + VersionID string +} + +// PutObjectRetention sets object retention for a given object and versionID. +func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("retention", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) + if err != nil { + return err + } + + retentionData, err := xml.Marshal(retention) + if err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(retentionData), + contentLength: int64(len(retentionData)), + contentMD5Base64: sumMD5Base64(retentionData), + contentSHA256Hex: sum256Hex(retentionData), + customHeader: headers, + } + + // Execute PUT Object Retention. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectRetention gets retention of given object. +func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, nil, err + } + urlValues := make(url.Values) + urlValues.Set("retention", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + retention := &objectRetention{} + if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { + return nil, nil, err + } + + return &retention.Mode, retention.RetainUntilDate, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go new file mode 100644 index 00000000..305c36de --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go @@ -0,0 +1,157 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// PutObjectTaggingOptions holds an object version id +// to update tag(s) of a specific object version +type PutObjectTaggingOptions struct { + VersionID string +} + +// PutObjectTagging replaces or creates object tag(s) and can target +// a specific object version in a versioned bucket. +func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + reqBytes, err := xml.Marshal(otags) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(reqBytes), + contentLength: int64(len(reqBytes)), + contentMD5Base64: sumMD5Base64(reqBytes), + } + + // Execute PUT to set a object tagging. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectTaggingOptions holds the object version ID +// to fetch the tagging key/value pairs +type GetObjectTaggingOptions struct { + VersionID string +} + +// GetObjectTagging fetches object tag(s) with options to target +// a specific object version in a versioned bucket. +func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on object to get object tag(s) + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return tags.ParseObjectXML(resp.Body) +} + +// RemoveObjectTaggingOptions holds the version id of the object to remove +type RemoveObjectTaggingOptions struct { + VersionID string +} + +// RemoveObjectTagging removes object tag(s) with options to control a specific object +// version in a versioned bucket +func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute DELETE on object to remove object tag(s) + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + // S3 returns "204 No content" after Object tag deletion. + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go new file mode 100644 index 00000000..2e4bacf1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go @@ -0,0 +1,228 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// presignURL - Returns a presigned URL for an input 'method'. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c *Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { + // Input validation. + if method == "" { + return nil, errInvalidArgument("method cannot be empty.") + } + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err = isValidExpiry(expires); err != nil { + return nil, err + } + + // Convert expires into seconds. + expireSeconds := int64(expires / time.Second) + reqMetadata := requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, + extraPresignHeader: extraHeaders, + } + + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + var req *http.Request + if req, err = c.newRequest(ctx, method, reqMetadata); err != nil { + return nil, err + } + return req.URL, nil +} + +// PresignedGetObject - Returns a presigned URL to access an object +// data without credentials. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c *Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedHeadObject - Returns a presigned URL to access +// object metadata without credentials. URL can have a maximum expiry +// of upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c *Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedPutObject - Returns a presigned URL to upload an object +// without credentials. URL can have a maximum expiry of upto 7days +// or a minimum of 1sec. +func (c *Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil) +} + +// PresignHeader - similar to Presign() but allows including HTTP headers that +// will be used to build the signature. The request using the resulting URL will +// need to have the exact same headers to be added for signature validation to +// pass. +// +// FIXME: The extra header parameter should be included in Presign() in the next +// major version bump, and this function should then be deprecated. +func (c *Client) PresignHeader(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders) +} + +// Presign - returns a presigned URL for any http method of your choice along +// with custom request params and extra signed headers. URL can have a maximum +// expiry of upto 7days or a minimum of 1sec. +func (c *Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedPostPolicy - Returns POST urlString, form data to upload an object. +func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the bucket location. + location, err := c.getBucketLocation(ctx, bucketName) + if err != nil { + return nil, nil, err + } + + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) + + u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) + if err != nil { + return nil, nil, err + } + + // Get credentials from the configured credentials provider. + credValues, err := c.credsProvider.Get() + if err != nil { + return nil, nil, err + } + + var ( + signerType = credValues.SignerType + sessionToken = credValues.SessionToken + accessKeyID = credValues.AccessKeyID + secretAccessKey = credValues.SecretAccessKey + ) + + if signerType.IsAnonymous() { + return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials") + } + + // Keep time. + t := time.Now().UTC() + // For signature version '2' handle here. + if signerType.IsV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + p.formData["GoogleAccessId"] = accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = accessKeyID + } + // Sign the policy. + p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey) + return u, p.formData, nil + } + + // Add date policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }); err != nil { + return nil, nil, err + } + + // Add algorithm policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }); err != nil { + return nil, nil, err + } + + // Add a credential policy. + credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3) + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }); err != nil { + return nil, nil, err + } + + if sessionToken != "" { + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-security-token", + value: sessionToken, + }); err != nil { + return nil, nil, err + } + } + + // Get base64 encoded policy. + policyBase64 := p.base64() + + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + if sessionToken != "" { + p.formData["x-amz-security-token"] = sessionToken + } + p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) + return u, p.formData, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go new file mode 100644 index 00000000..1a6db3e1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go @@ -0,0 +1,123 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Bucket operations +func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + // Validate the input arguments. + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { + return err + } + + err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking) + if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { + if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" { + err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking) + } + } + return err +} + +func (c *Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) { + defer func() { + // Save the location into cache on a successful makeBucket response. + if err == nil { + c.bucketLocCache.Set(bucketName, location) + } + }() + + // If location is empty, treat is a default region 'us-east-1'. + if location == "" { + location = "us-east-1" + // For custom region clients, default + // to custom region instead not 'us-east-1'. + if c.region != "" { + location = c.region + } + } + // PUT bucket request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + bucketLocation: location, + } + + if objectLockEnabled { + headers := make(http.Header) + headers.Add("x-amz-bucket-object-lock-enabled", "true") + reqMetadata.customHeader = headers + } + + // If location is not 'us-east-1' create bucket location config. + if location != "us-east-1" && location != "" { + createBucketConfig := createBucketConfiguration{} + createBucketConfig.Location = location + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return err + } + reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) + reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) + reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) + reqMetadata.contentLength = int64(len(createBucketConfigBytes)) + } + + // Execute PUT to create a new bucket. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Success. + return nil +} + +// MakeBucketOptions holds all options to tweak bucket creation +type MakeBucketOptions struct { + // Bucket location + Region string + // Enable object locking + ObjectLocking bool +} + +// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts. +// +// Location is an optional argument, by default all buckets are +// created in US Standard Region. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + return c.makeBucket(ctx, bucketName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go new file mode 100644 index 00000000..149a536e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -0,0 +1,150 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "math" + "os" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +const nullVersionID = "null" + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + var v *os.File + v, ok = reader.(*os.File) + if ok { + // Stdin, Stdout and Stderr all have *os.File type + // which happen to also be io.ReaderAt compatible + // we need to add special conditions for them to + // be ignored by this function. + for _, f := range []string{ + "/dev/stdin", + "/dev/stdout", + "/dev/stderr", + } { + if f == v.Name() { + ok = false + break + } + } + } else { + _, ok = reader.(io.ReaderAt) + } + return +} + +// OptimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// minPartSize - 16MiB +// maxMultipartPutObjectSize - 5TiB +// +func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { + // object size is '-1' set it to 5TiB. + var unknownSize bool + if objectSize == -1 { + unknownSize = true + objectSize = maxMultipartPutObjectSize + } + + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + return + } + + var partSizeFlt float64 + if configuredPartSize > 0 { + if int64(configuredPartSize) > objectSize { + err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") + return + } + + if !unknownSize { + if objectSize > (int64(configuredPartSize) * maxPartsCount) { + err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") + return + } + } + + if configuredPartSize < absMinPartSize { + err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") + return + } + + if configuredPartSize > maxPartSize { + err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") + return + } + + partSizeFlt = float64(configuredPartSize) + if unknownSize { + // If input has unknown size and part size is configured + // keep it to maximum allowed as per 10000 parts. + objectSize = int64(configuredPartSize) * maxPartsCount + } + } else { + configuredPartSize = minPartSize + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt = float64(objectSize / maxPartsCount) + partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) + } + + // Total parts count. + totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) + // Part size. + partSize = int64(partSizeFlt) + // Last part size. + lastPartSize = objectSize - int64(totalPartsCount-1)*partSize + return totalPartsCount, partSize, lastPartSize, nil +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return "", err + } + + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) + if err != nil { + return "", err + } + return initMultipartUploadResult.UploadID, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go new file mode 100644 index 00000000..4d29dfc1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go @@ -0,0 +1,64 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "mime" + "os" + "path/filepath" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return UploadInfo{}, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return UploadInfo{}, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Set contentType based on filepath extension if not given or default + // value of "application/octet-stream" if the extension has no associated type. + if opts.ContentType == "" { + if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { + opts.ContentType = "application/octet-stream" + } + } + return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go new file mode 100644 index 00000000..342a8dc2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -0,0 +1,398 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions, +) (info UploadInfo, err error) { + info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + } + return info, err +} + +func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + for partNumber <= totalPartsCount { + // Choose hash algorithms to be calculated by hashCopyN, + // avoid sha256 with non-v4 signature request or + // HTTPS connection. + hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5) + + length, rErr := readFull(reader, buf) + if rErr == io.EOF && partNumber > 1 { + break + } + + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return UploadInfo{}, rErr + } + + // Calculates hash sums while copying partSize bytes into cw. + for k, v := range hashAlgos { + v.Write(buf[:length]) + hashSums[k] = v.Sum(nil) + v.Close() + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Checksums.. + var ( + md5Base64 string + sha256Hex string + ) + if hashSums["md5"] != nil { + md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) + } + if hashSums["sha256"] != nil { + sha256Hex = hex.EncodeToString(hashSums["sha256"]) + } + + // Proceed to upload the part. + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rErr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + if opts.Internal.SourceVersionID != "" { + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) + } + } + urlValues.Set("versionId", opts.Internal.SourceVersionID) + } + + // Set ContentType header. + customHeader := opts.Header() + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Execute POST on an objectName to initiate multipart upload. + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// uploadPart - Uploads a part in a multipart upload. +func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide, +) (ObjectPart, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectPart{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectPart{}, err + } + if size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) + } + if size <= -1 { + return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) + } + if partNumber <= 0 { + return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") + } + if uploadID == "" { + return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // Set encryption headers, if any. + customHeader := make(http.Header) + // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html + // Server-side encryption is supported by the S3 Multipart Upload actions. + // Unless you are using a customer-provided encryption key, you don't need + // to specify the encryption parameters in each UploadPart request. + if sse != nil && sse.Type() == encrypt.SSEC { + sse.Marshal(customHeader) + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + } + + // Execute PUT on each part. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Once successfully uploaded, return completed part. + objPart := ObjectPart{} + objPart.Size = size + objPart.PartNumber = partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = trimEtag(resp.Header.Get("ETag")) + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, + complete completeMultipartUpload, opts PutObjectOptions, +) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return UploadInfo{}, err + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + customHeader: opts.Header(), + } + + // Execute POST to complete multipart upload for an objectName. + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Read resp.Body into a []bytes to parse for Error response inside the body + var b []byte + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + return UploadInfo{}, err + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return UploadInfo{}, err + } else if completeMultipartUploadResult.Bucket == "" { + // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. + // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values + // of the members. + + // Decode completed multipart upload response on failure + completeMultipartUploadErr := ErrorResponse{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return UploadInfo{}, err + } + return UploadInfo{}, completeMultipartUploadErr + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: completeMultipartUploadResult.Bucket, + Key: completeMultipartUploadResult.Key, + ETag: trimEtag(completeMultipartUploadResult.ETag), + VersionID: resp.Header.Get(amzVersionID), + Location: completeMultipartUploadResult.Location, + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go new file mode 100644 index 00000000..2497aecf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -0,0 +1,491 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// putObjectMultipartStream - upload a large object using +// multipart upload and streaming signature for signing payload. +// Comprehensive put object operation involving multipart uploads. +// +// Following code handles these types of readers. +// +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +// +func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { + // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. + info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) + } else { + info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts) + } + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + } + return info, err +} + +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part ObjectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part ObjectPart // Size of the part uploaded. +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. +// Supports all readers which implements io.ReaderAt interface +// (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + // Aborts the multipart upload in progress, if the + // function returns any error, since we do not resume + // we should purge the parts which have been uploaded + // to relinquish storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Declare a channel that sends the next part number to be uploaded. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) + + // Declare a channel that sends back the response of a part upload. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) + + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + // Send each part number to the channel to be processed. + for p := 1; p <= totalPartsCount; p++ { + uploadPartsCh <- uploadPartReq{PartNum: p} + } + close(uploadPartsCh) + + partsBuf := make([][]byte, opts.getNumThreads()) + for i := range partsBuf { + partsBuf[i] = make([]byte, 0, partSize) + } + + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= opts.getNumThreads(); w++ { + go func(w int, partSize int64) { + // Each worker will draw from the part channel and upload in parallel. + for uploadReq := range uploadPartsCh { + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (size - lastPartSize) + partSize = lastPartSize + } + + n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize]) + if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { + uploadedPartsCh <- uploadedPartRes{ + Error: rerr, + } + // Exit the goroutine. + return + } + + // Get a section reader on a particular offset. + hookReader := newHook(bytes.NewReader(partsBuf[w-1][:n]), opts.Progress) + + // Proceed to upload the part. + objPart, err := c.uploadPart(ctx, bucketName, objectName, + uploadID, hookReader, uploadReq.PartNum, + "", "", partSize, opts.ServerSideEncryption) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Error: err, + } + // Exit the goroutine. + return + } + + // Save successfully uploaded part metadata. + uploadReq.Part = objPart + + // Send successful part info through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: objPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + } + } + }(w, partSize) + } + + // Gather the responses as they occur and update any + // progress bar. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return UploadInfo{}, uploadRes.Error + } + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + // Store the parts to be completed in order. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + }) + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + // Avoid declaring variables in the for loop + var md5Base64 string + var hookReader io.Reader + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + + if opts.SendContentMd5 { + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, rerr + } + + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader = newHook(bytes.NewReader(buf[:length]), opts.Progress) + } else { + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader = newHook(reader, opts.Progress) + } + + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, + io.LimitReader(hookReader, partSize), + partNumber, md5Base64, "", partSize, opts.ServerSideEncryption) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// putObject special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Size -1 is only supported on Google Cloud Storage, we error + // out in all other situations. + if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { + return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName) + } + + if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { + return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") + } + + if size > 0 { + if isReadAt(reader) && !isObject(reader) { + seeker, ok := reader.(io.Seeker) + if ok { + offset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + } + } + } + + var md5Base64 string + if opts.SendContentMd5 { + // Create a buffer. + buf := make([]byte, size) + + length, rErr := readFull(reader, buf) + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return UploadInfo{}, rErr + } + + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + reader = bytes.NewReader(buf[:length]) + hash.Close() + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + readSeeker := newHook(reader, opts.Progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts) +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + // Set headers. + customHeader := opts.Header() + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + } + if opts.Internal.SourceVersionID != "" { + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } + urlValues := make(url.Values) + urlValues.Set("versionId", opts.Internal.SourceVersionID) + reqMetadata.queryValues = urlValues + } + + // Execute PUT an objectName. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: bucketName, + Key: objectName, + ETag: trimEtag(resp.Header.Get("ETag")), + VersionID: resp.Header.Get(amzVersionID), + Size: size, + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go new file mode 100644 index 00000000..0dc77e6c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -0,0 +1,391 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "sort" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/net/http/httpguts" +) + +// ReplicationStatus represents replication status of object +type ReplicationStatus string + +const ( + // ReplicationStatusPending indicates replication is pending + ReplicationStatusPending ReplicationStatus = "PENDING" + // ReplicationStatusComplete indicates replication completed ok + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + // ReplicationStatusFailed indicates replication failed + ReplicationStatusFailed ReplicationStatus = "FAILED" + // ReplicationStatusReplica indicates object is a replica of a source + ReplicationStatusReplica ReplicationStatus = "REPLICA" +) + +// Empty returns true if no replication status set. +func (r ReplicationStatus) Empty() bool { + return r == "" +} + +// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition +// implementation on MinIO server +type AdvancedPutOptions struct { + SourceVersionID string + SourceETag string + ReplicationStatus ReplicationStatus + SourceMTime time.Time + ReplicationRequest bool + RetentionTimestamp time.Time + TaggingTimestamp time.Time + LegalholdTimestamp time.Time +} + +// PutObjectOptions represents options specified by user for PutObject call +type PutObjectOptions struct { + UserMetadata map[string]string + UserTags map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + Mode RetentionMode + RetainUntilDate time.Time + ServerSideEncryption encrypt.ServerSide + NumThreads uint + StorageClass string + WebsiteRedirectLocation string + PartSize uint64 + LegalHold LegalHoldStatus + SendContentMd5 bool + DisableMultipart bool + Internal AdvancedPutOptions +} + +// getNumThreads - gets the number of threads to be used in the multipart +// put object operation +func (opts PutObjectOptions) getNumThreads() (numThreads int) { + if opts.NumThreads > 0 { + numThreads = int(opts.NumThreads) + } else { + numThreads = totalWorkers + } + return +} + +// Header - constructs the headers from metadata entered by user in +// PutObjectOptions struct +func (opts PutObjectOptions) Header() (header http.Header) { + header = make(http.Header) + + contentType := opts.ContentType + if contentType == "" { + contentType = "application/octet-stream" + } + header.Set("Content-Type", contentType) + + if opts.ContentEncoding != "" { + header.Set("Content-Encoding", opts.ContentEncoding) + } + if opts.ContentDisposition != "" { + header.Set("Content-Disposition", opts.ContentDisposition) + } + if opts.ContentLanguage != "" { + header.Set("Content-Language", opts.ContentLanguage) + } + if opts.CacheControl != "" { + header.Set("Cache-Control", opts.CacheControl) + } + + if opts.Mode != "" { + header.Set(amzLockMode, opts.Mode.String()) + } + + if !opts.RetainUntilDate.IsZero() { + header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339)) + } + + if opts.LegalHold != "" { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(header) + } + + if opts.StorageClass != "" { + header.Set(amzStorageClass, opts.StorageClass) + } + + if opts.WebsiteRedirectLocation != "" { + header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation) + } + + if !opts.Internal.ReplicationStatus.Empty() { + header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) + } + if !opts.Internal.SourceMTime.IsZero() { + header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano)) + } + if opts.Internal.SourceETag != "" { + header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) + } + if opts.Internal.ReplicationRequest { + header.Set(minIOBucketReplicationRequest, "") + } + if !opts.Internal.LegalholdTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.RetentionTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.TaggingTimestamp.IsZero() { + header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + + if len(opts.UserTags) != 0 { + header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) + } + + for k, v := range opts.UserMetadata { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } + return +} + +// validate() checks if the UserMetadata map has standard headers or and raises an error if so. +func (opts PutObjectOptions) validate() (err error) { + for k, v := range opts.UserMetadata { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { + return errInvalidArgument(k + " unsupported user defined metadata name") + } + if !httpguts.ValidHeaderFieldValue(v) { + return errInvalidArgument(v + " unsupported user defined metadata value") + } + } + if opts.Mode != "" && !opts.Mode.IsValid() { + return errInvalidArgument(opts.Mode.String() + " unsupported retention mode") + } + if opts.LegalHold != "" && !opts.LegalHold.IsValid() { + return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") + } + return nil +} + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []CompletePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 16MiB PutObject automatically does a +// single atomic PUT operation. +// +// - For size larger than 16MiB PutObject automatically does a +// multipart upload operation. +// +// - For size input as -1 PutObject does a multipart Put operation +// until input stream reaches EOF. Maximum object size that can +// be uploaded through this operation will be 5TiB. +// +// WARNING: Passing down '-1' will use memory and these cannot +// be reused for best outcomes for PutObject(), pass the size always. +// +// NOTE: Upon errors during upload multipart operation is entirely aborted. +func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions, +) (info UploadInfo, err error) { + if objectSize < 0 && opts.DisableMultipart { + return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") + } + + err = opts.validate() + if err != nil { + return UploadInfo{}, err + } + + return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) +} + +func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + partSize := opts.PartSize + if opts.PartSize == 0 { + partSize = minPartSize + } + + if c.overrideSignerType.IsV2() { + if size >= 0 && size < int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) + } + + if size < 0 { + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) + } + + if size < int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) +} + +func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + for partNumber <= totalPartsCount { + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { + return UploadInfo{}, rerr + } + + var md5Base64 string + if opts.SendContentMd5 { + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Proceed to upload the part. + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + md5Base64, "", int64(length), opts.ServerSideEncryption) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rerr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go new file mode 100644 index 00000000..b7502e2d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -0,0 +1,215 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/s2" +) + +// SnowballOptions contains options for PutObjectsSnowball calls. +type SnowballOptions struct { + // Opts is options applied to all objects. + Opts PutObjectOptions + + // Processing options: + + // InMemory specifies that all objects should be collected in memory + // before they are uploaded. + // If false a temporary file will be created. + InMemory bool + + // Compress enabled content compression before upload. + // Compression will typically reduce memory and network usage, + // Compression can safely be enabled with MinIO hosts. + Compress bool +} + +// SnowballObject contains information about a single object to be added to the snowball. +type SnowballObject struct { + // Key is the destination key, including prefix. + Key string + + // Size is the content size of this object. + Size int64 + + // Modtime to apply to the object. + ModTime time.Time + + // Content of the object. + // Exactly 'Size' number of bytes must be provided. + Content io.Reader + + // Close will be called when an object has finished processing. + // Note that if PutObjectsSnowball returns because of an error, + // objects not consumed from the input will NOT have been closed. + // Leave as nil for no callback. + Close func() +} + +type nopReadSeekCloser struct { + io.ReadSeeker +} + +func (n nopReadSeekCloser) Close() error { + return nil +} + +// This is available as io.ReadSeekCloser from go1.16 +type readSeekCloser interface { + io.Reader + io.Closer + io.Seeker +} + +// PutObjectsSnowball will put multiple objects with a single put call. +// A (compressed) TAR file will be created which will contain multiple objects. +// The key for each object will be used for the destination in the specified bucket. +// Total size should be < 5TB. +// This function blocks until 'objs' is closed and the content has been uploaded. +func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) { + err = opts.Opts.validate() + if err != nil { + return err + } + var tmpWriter io.Writer + var getTmpReader func() (rc readSeekCloser, sz int64, err error) + if opts.InMemory { + b := bytes.NewBuffer(nil) + tmpWriter = b + getTmpReader = func() (readSeekCloser, int64, error) { + return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil + } + } else { + f, err := ioutil.TempFile("", "s3-putsnowballobjects-*") + if err != nil { + return err + } + name := f.Name() + tmpWriter = f + var once sync.Once + defer once.Do(func() { + f.Close() + }) + defer os.Remove(name) + getTmpReader = func() (readSeekCloser, int64, error) { + once.Do(func() { + f.Close() + }) + f, err := os.Open(name) + if err != nil { + return nil, 0, err + } + st, err := f.Stat() + if err != nil { + return nil, 0, err + } + return f, st.Size(), nil + } + } + flush := func() error { return nil } + if !opts.Compress { + if !opts.InMemory { + // Insert buffer for writes. + buf := bufio.NewWriterSize(tmpWriter, 1<<20) + flush = buf.Flush + tmpWriter = buf + } + } else { + s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression()) + flush = s2c.Close + defer s2c.Close() + tmpWriter = s2c + } + t := tar.NewWriter(tmpWriter) + +objectLoop: + for { + select { + case <-ctx.Done(): + return ctx.Err() + case obj, ok := <-objs: + if !ok { + break objectLoop + } + + closeObj := func() {} + if obj.Close != nil { + closeObj = obj.Close + } + + // Trim accidental slash prefix. + obj.Key = strings.TrimPrefix(obj.Key, "/") + header := tar.Header{ + Typeflag: tar.TypeReg, + Name: obj.Key, + Size: obj.Size, + ModTime: obj.ModTime, + Format: tar.FormatPAX, + } + if err := t.WriteHeader(&header); err != nil { + closeObj() + return err + } + n, err := io.Copy(t, obj.Content) + if err != nil { + closeObj() + return err + } + if n != obj.Size { + closeObj() + return io.ErrUnexpectedEOF + } + closeObj() + } + } + // Flush tar + err = t.Flush() + if err != nil { + return err + } + // Flush compression + err = flush() + if err != nil { + return err + } + if opts.Opts.UserMetadata == nil { + opts.Opts.UserMetadata = map[string]string{} + } + opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true" + opts.Opts.DisableMultipart = true + rc, sz, err := getTmpReader() + if err != nil { + return err + } + defer rc.Close() + rand := c.random.Uint64() + _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts) + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go new file mode 100644 index 00000000..0fee9022 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -0,0 +1,544 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +//revive:disable + +// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions. +type BucketOptions = RemoveBucketOptions + +//revive:enable + +// RemoveBucketOptions special headers to purge buckets, only +// useful when endpoint is MinIO +type RemoveBucketOptions struct { + ForceDelete bool +} + +// RemoveBucketWithOptions deletes the bucket name. +// +// All objects (including all object versions and delete markers) +// in the bucket will be deleted forcibly if bucket options set +// ForceDelete to 'true'. +func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } + + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + return nil +} + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// AdvancedRemoveOptions intended for internal use by replication +type AdvancedRemoveOptions struct { + ReplicationDeleteMarker bool + ReplicationStatus ReplicationStatus + ReplicationMTime time.Time + ReplicationRequest bool +} + +// RemoveObjectOptions represents options specified by user for RemoveObject call +type RemoveObjectOptions struct { + ForceDelete bool + GovernanceBypass bool + VersionID string + Internal AdvancedRemoveOptions +} + +// RemoveObject removes an object from a bucket. +func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + res := c.removeObject(ctx, bucketName, objectName, opts) + return res.Err +} + +func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + if opts.Internal.ReplicationDeleteMarker { + headers.Set(minIOBucketReplicationDeleteMarker, "true") + } + if !opts.Internal.ReplicationMTime.IsZero() { + headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano)) + } + if !opts.Internal.ReplicationStatus.Empty() { + headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) + } + if opts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "") + } + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + queryValues: urlValues, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return RemoveObjectResult{Err: err} + } + if resp != nil { + // if some unexpected error happened and max retry is reached, we want to let client know + if resp.StatusCode != http.StatusNoContent { + err := httpRespToErrorResponse(resp, bucketName, objectName) + return RemoveObjectResult{Err: err} + } + } + + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return RemoveObjectResult{ + ObjectName: objectName, + ObjectVersionID: opts.VersionID, + DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true", + DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"), + } +} + +// RemoveObjectError - container of Multi Delete S3 API error +type RemoveObjectError struct { + ObjectName string + VersionID string + Err error +} + +// RemoveObjectResult - container of Multi Delete S3 API result +type RemoveObjectResult struct { + ObjectName string + ObjectVersionID string + + DeleteMarker bool + DeleteMarkerVersionID string + + Err error +} + +// generateRemoveMultiObjects - generate the XML request for remove multi objects request +func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { + delObjects := []deleteObject{} + for _, obj := range objects { + delObjects = append(delObjects, deleteObject{ + Key: obj.Key, + VersionID: obj.VersionID, + }) + } + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false}) + return xmlBytes +} + +// processRemoveMultiObjectsResponse - parse the remove multi objects web service +// and return the success/failure result status for each object +func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, resultCh chan<- RemoveObjectResult) { + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + err := xmlDecoder(body, rmResult) + if err != nil { + resultCh <- RemoveObjectResult{ObjectName: "", Err: err} + return + } + + // Fill deletion that returned success + for _, obj := range rmResult.DeletedObjects { + resultCh <- RemoveObjectResult{ + ObjectName: obj.Key, + // Only filled with versioned buckets + ObjectVersionID: obj.VersionID, + DeleteMarker: obj.DeleteMarker, + DeleteMarkerVersionID: obj.DeleteMarkerVersionID, + } + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + // Version does not exist is not an error ignore and continue. + switch obj.Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + resultCh <- RemoveObjectResult{ + ObjectName: obj.Key, + ObjectVersionID: obj.VersionID, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + } + } +} + +// RemoveObjectsOptions represents options specified by user for RemoveObjects call +type RemoveObjectsOptions struct { + GovernanceBypass bool +} + +// RemoveObjects removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove failures are sent back via error channel. +func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { + errorCh := make(chan RemoveObjectError, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: err, + } + return errorCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: errInvalidArgument("Objects channel cannot be nil"), + } + return errorCh + } + + resultCh := make(chan RemoveObjectResult, 1) + go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) + go func() { + defer close(errorCh) + for res := range resultCh { + // Send only errors to the error channel + if res.Err == nil { + continue + } + errorCh <- RemoveObjectError{ + ObjectName: res.ObjectName, + VersionID: res.ObjectVersionID, + Err: res.Err, + } + } + }() + + return errorCh +} + +// RemoveObjectsWithResult removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove results, successes and failures are sent back via +// RemoveObjectResult channel +func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult { + resultCh := make(chan RemoveObjectResult, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(resultCh) + resultCh <- RemoveObjectResult{ + Err: err, + } + return resultCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(resultCh) + resultCh <- RemoveObjectResult{ + Err: errInvalidArgument("Objects channel cannot be nil"), + } + return resultCh + } + + go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) + return resultCh +} + +// Return true if the character is within the allowed characters in an XML 1.0 document +// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets +func validXMLChar(r rune) (ok bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +func hasInvalidXMLChar(str string) bool { + for _, s := range str { + if !validXMLChar(s) { + return true + } + } + return false +} + +// Generate and call MultiDelete S3 requests based on entries received from objectsCh +func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Close result channel when Multi delete finishes. + defer close(resultCh) + + // Loop over entries by 1000 and call MultiDelete requests + for { + if finish { + break + } + count := 0 + var batch []ObjectInfo + + // Try to gather 1000 entries + for object := range objectsCh { + if hasInvalidXMLChar(object.Key) { + // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. + removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ + VersionID: object.VersionID, + GovernanceBypass: opts.GovernanceBypass, + }) + if err := removeResult.Err; err != nil { + // Version does not exist is not an error ignore and continue. + switch ToErrorResponse(err).Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + resultCh <- removeResult + } + + resultCh <- removeResult + continue + } + + batch = append(batch, object) + if count++; count >= maxEntries { + break + } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediately + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } + + // Build headers. + headers := make(http.Header) + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + customHeader: headers, + }) + if resp != nil { + if resp.StatusCode != http.StatusOK { + e := httpRespToErrorResponse(resp, bucketName, "") + resultCh <- RemoveObjectResult{ObjectName: "", Err: e} + } + } + if err != nil { + for _, b := range batch { + resultCh <- RemoveObjectResult{ + ObjectName: b.Key, + ObjectVersionID: b.VersionID, + Err: err, + } + } + continue + } + + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, batch, resultCh) + + closeResponse(resp) + } +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + // Find multipart upload ids of the object to be aborted. + uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName) + if err != nil { + return err + } + + for _, uploadID := range uploadIDs { + // abort incomplete multipart upload, based on the upload id passed. + err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + if err != nil { + return err + } + } + + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. +func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Execute DELETE on multipart upload. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it for any errors. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for abort and it cannot + // be converged into default case. + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + default: + return httpRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go new file mode 100644 index 00000000..9ec8f4f2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-restore.go @@ -0,0 +1,182 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// RestoreType represents the restore request type +type RestoreType string + +const ( + // RestoreSelect represents the restore SELECT operation + RestoreSelect = RestoreType("SELECT") +) + +// TierType represents a retrieval tier +type TierType string + +const ( + // TierStandard is the standard retrieval tier + TierStandard = TierType("Standard") + // TierBulk is the bulk retrieval tier + TierBulk = TierType("Bulk") + // TierExpedited is the expedited retrieval tier + TierExpedited = TierType("Expedited") +) + +// GlacierJobParameters represents the retrieval tier parameter +type GlacierJobParameters struct { + Tier TierType +} + +// Encryption contains the type of server-side encryption used during object retrieval +type Encryption struct { + EncryptionType string + KMSContext string + KMSKeyID string `xml:"KMSKeyId"` +} + +// MetadataEntry represents a metadata information of the restored object. +type MetadataEntry struct { + Name string + Value string +} + +// S3 holds properties of the copy of the archived object +type S3 struct { + AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"` + BucketName string + Prefix string + CannedACL *string `xml:"CannedACL,omitempty"` + Encryption *Encryption `xml:"Encryption,omitempty"` + StorageClass *string `xml:"StorageClass,omitempty"` + Tagging *tags.Tags `xml:"Tagging,omitempty"` + UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"` +} + +// SelectParameters holds the select request parameters +type SelectParameters struct { + XMLName xml.Name `xml:"SelectParameters"` + ExpressionType QueryExpressionType + Expression string + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization +} + +// OutputLocation holds properties of the copy of the archived object +type OutputLocation struct { + XMLName xml.Name `xml:"OutputLocation"` + S3 S3 `xml:"S3"` +} + +// RestoreRequest holds properties of the restore object request +type RestoreRequest struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"` + Type *RestoreType `xml:"Type,omitempty"` + Tier *TierType `xml:"Tier,omitempty"` + Days *int `xml:"Days,omitempty"` + GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"` + Description *string `xml:"Description,omitempty"` + SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"` + OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"` +} + +// SetDays sets the days parameter of the restore request +func (r *RestoreRequest) SetDays(v int) { + r.Days = &v +} + +// SetGlacierJobParameters sets the GlacierJobParameters of the restore request +func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) { + r.GlacierJobParameters = &v +} + +// SetType sets the type of the restore request +func (r *RestoreRequest) SetType(v RestoreType) { + r.Type = &v +} + +// SetTier sets the retrieval tier of the restore request +func (r *RestoreRequest) SetTier(v TierType) { + r.Tier = &v +} + +// SetDescription sets the description of the restore request +func (r *RestoreRequest) SetDescription(v string) { + r.Description = &v +} + +// SetSelectParameters sets SelectParameters of the restore select request +func (r *RestoreRequest) SetSelectParameters(v SelectParameters) { + r.SelectParameters = &v +} + +// SetOutputLocation sets the properties of the copy of the archived object +func (r *RestoreRequest) SetOutputLocation(v OutputLocation) { + r.OutputLocation = &v +} + +// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API +func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + restoreRequestBytes, err := xml.Marshal(req) + if err != nil { + return err + } + + urlValues := make(url.Values) + urlValues.Set("restore", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentMD5Base64: sumMD5Base64(restoreRequestBytes), + contentSHA256Hex: sum256Hex(restoreRequestBytes), + contentBody: bytes.NewReader(restoreRequestBytes), + contentLength: int64(len(restoreRequestBytes)), + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go new file mode 100644 index 00000000..592d4cdc --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -0,0 +1,361 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "errors" + "io" + "reflect" + "time" +) + +// listAllMyBucketsResult container for listBuckets response. +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketInfo + } + Owner owner +} + +// owner container for bucket owner information. +type owner struct { + DisplayName string + ID string +} + +// CommonPrefix container for prefix response. +type CommonPrefix struct { + Prefix string +} + +// ListBucketV2Result container for listObjects response version 2. +type ListBucketV2Result struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + MaxKeys int64 + Name string + + // Hold the token that will be sent in the next request to fetch the next group of keys + NextContinuationToken string + + ContinuationToken string + Prefix string + + // FetchOwner and StartAfter are currently not used + FetchOwner string + StartAfter string +} + +// Version is an element in the list object versions response +type Version struct { + ETag string + IsLatest bool + Key string + LastModified time.Time + Owner Owner + Size int64 + StorageClass string + VersionID string `xml:"VersionId"` + + isDeleteMarker bool +} + +// ListVersionsResult is an element in the list object versions response +// and has a special Unmarshaler because we need to preserver the order +// of and in ListVersionsResult.Versions slice +type ListVersionsResult struct { + Versions []Version + + CommonPrefixes []CommonPrefix + Name string + Prefix string + Delimiter string + MaxKeys int64 + EncodingType string + IsTruncated bool + KeyMarker string + VersionIDMarker string + NextKeyMarker string + NextVersionIDMarker string +} + +// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom +// code will unmarshal and tags and save them in Versions field to +// preserve the lexical order of the listing. +func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + for { + // Read tokens from the XML document in a stream. + t, err := d.Token() + if err != nil { + if err == io.EOF { + break + } + return err + } + + se, ok := t.(xml.StartElement) + if ok { + tagName := se.Name.Local + switch tagName { + case "Name", "Prefix", + "Delimiter", "EncodingType", + "KeyMarker", "NextKeyMarker": + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + v := reflect.ValueOf(l).Elem().FieldByName(tagName) + if v.IsValid() { + v.SetString(s) + } + case "VersionIdMarker": + // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + l.VersionIDMarker = s + case "NextVersionIdMarker": + // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + l.NextVersionIDMarker = s + case "IsTruncated": // bool + var b bool + if err = d.DecodeElement(&b, &se); err != nil { + return err + } + l.IsTruncated = b + case "MaxKeys": // int64 + var i int64 + if err = d.DecodeElement(&i, &se); err != nil { + return err + } + l.MaxKeys = i + case "CommonPrefixes": + var cp CommonPrefix + if err = d.DecodeElement(&cp, &se); err != nil { + return err + } + l.CommonPrefixes = append(l.CommonPrefixes, cp) + case "DeleteMarker", "Version": + var v Version + if err = d.DecodeElement(&v, &se); err != nil { + return err + } + if tagName == "DeleteMarker" { + v.isDeleteMarker = true + } + l.Versions = append(l.Versions, v) + default: + return errors.New("unrecognized option:" + tagName) + } + + } + } + return nil +} + +// ListBucketResult container for listObjects response. +type ListBucketResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// ListMultipartUploadsResult container for ListMultipartUploads response +type ListMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartInfo `xml:"Upload"` + Prefix string + Delimiter string + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []CommonPrefix +} + +// initiator container for who initiated multipart upload. +type initiator struct { + ID string + DisplayName string +} + +// copyObjectResult container for copy object response. +type copyObjectResult struct { + ETag string + LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" +} + +// ObjectPart container for particular part of an object. +type ObjectPart struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. + ETag string + + // Size of the uploaded part data. + Size int64 +} + +// ListObjectPartsResult container for ListObjectParts response. +type ListObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + ObjectParts []ObjectPart `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart +// upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string +} + +// CompletePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. +type CompletePart struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` + + // Part number identifies the part. + PartNumber int + ETag string +} + +// completeMultipartUpload container for completing multipart upload. +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []CompletePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// deleteObject container for Delete element in MultiObjects Delete XML request +type deleteObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` +} + +// deletedObject container for Deleted element in MultiObjects Delete XML response +type deletedObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` + // These fields are ignored. + DeleteMarker bool + DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"` +} + +// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response +type nonDeletedObject struct { + Key string + Code string + Message string + VersionID string `xml:"VersionId"` +} + +// deletedMultiObjects container for MultiObjects Delete XML request +type deleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool + Objects []deleteObject `xml:"Object"` +} + +// deletedMultiObjectsResult container for MultiObjects Delete XML response +type deleteMultiObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []deletedObject `xml:"Deleted"` + UnDeletedObjects []nonDeletedObject `xml:"Error"` +} diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go new file mode 100644 index 00000000..5d47d7ec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -0,0 +1,757 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// CSVFileHeaderInfo - is the parameter for whether to utilize headers. +type CSVFileHeaderInfo string + +// Constants for file header info. +const ( + CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" + CSVFileHeaderInfoIgnore = "IGNORE" + CSVFileHeaderInfoUse = "USE" +) + +// SelectCompressionType - is the parameter for what type of compression is +// present +type SelectCompressionType string + +// Constants for compression types under select API. +const ( + SelectCompressionNONE SelectCompressionType = "NONE" + SelectCompressionGZIP = "GZIP" + SelectCompressionBZIP = "BZIP2" + + // Non-standard compression schemes, supported by MinIO hosts: + + SelectCompressionZSTD = "ZSTD" // Zstandard compression. + SelectCompressionLZ4 = "LZ4" // LZ4 Stream + SelectCompressionS2 = "S2" // S2 Stream + SelectCompressionSNAPPY = "SNAPPY" // Snappy stream +) + +// CSVQuoteFields - is the parameter for how CSV fields are quoted. +type CSVQuoteFields string + +// Constants for csv quote styles. +const ( + CSVQuoteFieldsAlways CSVQuoteFields = "Always" + CSVQuoteFieldsAsNeeded = "AsNeeded" +) + +// QueryExpressionType - is of what syntax the expression is, this should only +// be SQL +type QueryExpressionType string + +// Constants for expression type. +const ( + QueryExpressionTypeSQL QueryExpressionType = "SQL" +) + +// JSONType determines json input serialization type. +type JSONType string + +// Constants for JSONTypes. +const ( + JSONDocumentType JSONType = "DOCUMENT" + JSONLinesType = "LINES" +) + +// ParquetInputOptions parquet input specific options +type ParquetInputOptions struct{} + +// CSVInputOptions csv input specific options +type CSVInputOptions struct { + FileHeaderInfo CSVFileHeaderInfo + fileHeaderInfoSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool + + Comments string + commentsSet bool +} + +// SetFileHeaderInfo sets the file header info in the CSV input options +func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) { + c.FileHeaderInfo = val + c.fileHeaderInfoSet = true +} + +// SetRecordDelimiter sets the record delimiter in the CSV input options +func (c *CSVInputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter in the CSV input options +func (c *CSVInputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV input options +func (c *CSVInputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options +func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// SetComments sets the comments character in the CSV input options +func (c *CSVInputOptions) SetComments(val string) { + c.Comments = val + c.commentsSet = true +} + +// MarshalXML - produces the xml representation of the CSV input options struct +func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + if c.FileHeaderInfo != "" || c.fileHeaderInfoSet { + if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + if c.Comments != "" || c.commentsSet { + if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// CSVOutputOptions csv output specific options +type CSVOutputOptions struct { + QuoteFields CSVQuoteFields + quoteFieldsSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool +} + +// SetQuoteFields sets the quote field parameter in the CSV output options +func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) { + c.QuoteFields = val + c.quoteFieldsSet = true +} + +// SetRecordDelimiter sets the record delimiter character in the CSV output options +func (c *CSVOutputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter character in the CSV output options +func (c *CSVOutputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV output options +func (c *CSVOutputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options +func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// MarshalXML - produces the xml representation of the CSVOutputOptions struct +func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if c.QuoteFields != "" || c.quoteFieldsSet { + if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// JSONInputOptions json input specific options +type JSONInputOptions struct { + Type JSONType + typeSet bool +} + +// SetType sets the JSON type in the JSON input options +func (j *JSONInputOptions) SetType(typ JSONType) { + j.Type = typ + j.typeSet = true +} + +// MarshalXML - produces the xml representation of the JSONInputOptions struct +func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.Type != "" || j.typeSet { + if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// JSONOutputOptions - json output specific options +type JSONOutputOptions struct { + RecordDelimiter string + recordDelimiterSet bool +} + +// SetRecordDelimiter sets the record delimiter in the JSON output options +func (j *JSONOutputOptions) SetRecordDelimiter(val string) { + j.RecordDelimiter = val + j.recordDelimiterSet = true +} + +// MarshalXML - produces the xml representation of the JSONOutputOptions struct +func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.RecordDelimiter != "" || j.recordDelimiterSet { + if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// SelectObjectInputSerialization - input serialization parameters +type SelectObjectInputSerialization struct { + CompressionType SelectCompressionType `xml:"CompressionType,omitempty"` + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOutputSerialization - output serialization parameters. +type SelectObjectOutputSerialization struct { + CSV *CSVOutputOptions `xml:"CSV,omitempty"` + JSON *JSONOutputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOptions - represents the input select body +type SelectObjectOptions struct { + XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` + ServerSideEncryption encrypt.ServerSide `xml:"-"` + Expression string + ExpressionType QueryExpressionType + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization + RequestProgress struct { + Enabled bool + } +} + +// Header returns the http.Header representation of the SelectObject options. +func (o SelectObjectOptions) Header() http.Header { + headers := make(http.Header) + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// SelectObjectType - is the parameter which defines what type of object the +// operation is being performed on. +type SelectObjectType string + +// Constants for input data types. +const ( + SelectObjectTypeCSV SelectObjectType = "CSV" + SelectObjectTypeJSON = "JSON" + SelectObjectTypeParquet = "Parquet" +) + +// preludeInfo is used for keeping track of necessary information from the +// prelude. +type preludeInfo struct { + totalLen uint32 + headerLen uint32 +} + +// SelectResults is used for the streaming responses from the server. +type SelectResults struct { + pipeReader *io.PipeReader + resp *http.Response + stats *StatsMessage + progress *ProgressMessage +} + +// ProgressMessage is a struct for progress xml message. +type ProgressMessage struct { + XMLName xml.Name `xml:"Progress" json:"-"` + StatsMessage +} + +// StatsMessage is a struct for stat xml message. +type StatsMessage struct { + XMLName xml.Name `xml:"Stats" json:"-"` + BytesScanned int64 + BytesProcessed int64 + BytesReturned int64 +} + +// messageType represents the type of message. +type messageType string + +const ( + errorMsg messageType = "error" + commonMsg = "event" +) + +// eventType represents the type of event. +type eventType string + +// list of event-types returned by Select API. +const ( + endEvent eventType = "End" + recordsEvent = "Records" + progressEvent = "Progress" + statsEvent = "Stats" +) + +// contentType represents content type of event. +type contentType string + +const ( + xmlContent contentType = "text/xml" +) + +// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. +func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + selectReqBytes, err := xml.Marshal(opts) + if err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Set("select", "") + urlValues.Set("select-type", "2") + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentMD5Base64: sumMD5Base64(selectReqBytes), + contentSHA256Hex: sum256Hex(selectReqBytes), + contentBody: bytes.NewReader(selectReqBytes), + contentLength: int64(len(selectReqBytes)), + }) + if err != nil { + return nil, err + } + + return NewSelectResults(resp, bucketName) +} + +// NewSelectResults creates a Select Result parser that parses the response +// and returns a Reader that will return parsed and assembled select output. +func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + pipeReader, pipeWriter := io.Pipe() + streamer := &SelectResults{ + resp: resp, + stats: &StatsMessage{}, + progress: &ProgressMessage{}, + pipeReader: pipeReader, + } + streamer.start(pipeWriter) + return streamer, nil +} + +// Close - closes the underlying response body and the stream reader. +func (s *SelectResults) Close() error { + defer closeResponse(s.resp) + return s.pipeReader.Close() +} + +// Read - is a reader compatible implementation for SelectObjectContent records. +func (s *SelectResults) Read(b []byte) (n int, err error) { + return s.pipeReader.Read(b) +} + +// Stats - information about a request's stats when processing is complete. +func (s *SelectResults) Stats() *StatsMessage { + return s.stats +} + +// Progress - information about the progress of a request. +func (s *SelectResults) Progress() *ProgressMessage { + return s.progress +} + +// start is the main function that decodes the large byte array into +// several events that are sent through the eventstream. +func (s *SelectResults) start(pipeWriter *io.PipeWriter) { + go func() { + for { + var prelude preludeInfo + headers := make(http.Header) + var err error + + // Create CRC code + crc := crc32.New(crc32.IEEETable) + crcReader := io.TeeReader(s.resp.Body, crc) + + // Extract the prelude(12 bytes) into a struct to extract relevant information. + prelude, err = processPrelude(crcReader, crc) + if err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + // Extract the headers(variable bytes) into a struct to extract relevant information + if prelude.headerLen > 0 { + if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + } + + // Get the actual payload length so that the appropriate amount of + // bytes can be read or parsed. + payloadLen := prelude.PayloadLen() + + m := messageType(headers.Get("message-type")) + + switch m { + case errorMsg: + pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) + closeResponse(s.resp) + return + case commonMsg: + // Get content-type of the payload. + c := contentType(headers.Get("content-type")) + + // Get event type of the payload. + e := eventType(headers.Get("event-type")) + + // Handle all supported events. + switch e { + case endEvent: + pipeWriter.Close() + closeResponse(s.resp) + return + case recordsEvent: + if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + case progressEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) + closeResponse(s.resp) + return + } + case statsEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) + closeResponse(s.resp) + return + } + } + } + + // Ensures that the full message's CRC is correct and + // that the message is not corrupted + if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + } + }() +} + +// PayloadLen is a function that calculates the length of the payload. +func (p preludeInfo) PayloadLen() int64 { + return int64(p.totalLen - p.headerLen - 16) +} + +// processPrelude is the function that reads the 12 bytes of the prelude and +// ensures the CRC is correct while also extracting relevant information into +// the struct, +func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { + var err error + pInfo := preludeInfo{} + + // reads total length of the message (first 4 bytes) + pInfo.totalLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // reads total header length of the message (2nd 4 bytes) + pInfo.headerLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // checks that the CRC is correct (3rd 4 bytes) + preCRC := crc.Sum32() + if err := checkCRC(prelude, preCRC); err != nil { + return pInfo, err + } + + return pInfo, nil +} + +// extracts the relevant information from the Headers. +func extractHeader(body io.Reader, myHeaders http.Header) error { + for { + // extracts the first part of the header, + headerTypeName, err := extractHeaderType(body) + if err != nil { + // Since end of file, we have read all of our headers + if err == io.EOF { + break + } + return err + } + + // reads the 7 present in the header and ignores it. + extractUint8(body) + + headerValueName, err := extractHeaderValue(body) + if err != nil { + return err + } + + myHeaders.Set(headerTypeName, headerValueName) + + } + return nil +} + +// extractHeaderType extracts the first half of the header message, the header type. +func extractHeaderType(body io.Reader) (string, error) { + // extracts 2 bit integer + headerNameLen, err := extractUint8(body) + if err != nil { + return "", err + } + // extracts the string with the appropriate number of bytes + headerName, err := extractString(body, int(headerNameLen)) + if err != nil { + return "", err + } + return strings.TrimPrefix(headerName, ":"), nil +} + +// extractsHeaderValue extracts the second half of the header message, the +// header value +func extractHeaderValue(body io.Reader) (string, error) { + bodyLen, err := extractUint16(body) + if err != nil { + return "", err + } + bodyName, err := extractString(body, int(bodyLen)) + if err != nil { + return "", err + } + return bodyName, nil +} + +// extracts a string from byte array of a particular number of bytes. +func extractString(source io.Reader, lenBytes int) (string, error) { + myVal := make([]byte, lenBytes) + _, err := source.Read(myVal) + if err != nil { + return "", err + } + return string(myVal), nil +} + +// extractUint32 extracts a 4 byte integer from the byte array. +func extractUint32(r io.Reader) (uint32, error) { + buf := make([]byte, 4) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(buf), nil +} + +// extractUint16 extracts a 2 byte integer from the byte array. +func extractUint16(r io.Reader) (uint16, error) { + buf := make([]byte, 2) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(buf), nil +} + +// extractUint8 extracts a 1 byte integer from the byte array. +func extractUint8(r io.Reader) (uint8, error) { + buf := make([]byte, 1) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return buf[0], nil +} + +// checkCRC ensures that the CRC matches with the one from the reader. +func checkCRC(r io.Reader, expect uint32) error { + msgCRC, err := extractUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go new file mode 100644 index 00000000..6deb5f5d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -0,0 +1,115 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to +// control cancellations and timeouts. +func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return false, err + } + + // Execute HEAD on bucketName. + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + if ToErrorResponse(err).Code == "NoSuchBucket" { + return false, nil + } + return false, err + } + if resp != nil { + resperr := httpRespToErrorResponse(resp, bucketName, "") + if ToErrorResponse(resperr).Code == "NoSuchBucket" { + return false, nil + } + if resp.StatusCode != http.StatusOK { + return false, httpRespToErrorResponse(resp, bucketName, "") + } + } + return true, nil +} + +// StatObject verifies if object exists and you have permission to access. +func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + headers := opts.Header() + if opts.Internal.ReplicationDeleteMarker { + headers.Set(minIOBucketReplicationDeleteMarker, "true") + } + + urlValues := make(url.Values) + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + // Execute HEAD on objectName. + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + if resp != nil { + deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "MethodNotAllowed", + Message: "The specified method is not allowed against this resource.", + BucketName: bucketName, + Key: objectName, + } + return ObjectInfo{ + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + }, errResp + } + return ObjectInfo{ + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + }, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return ToObjectInfo(bucketName, objectName, resp.Header) +} diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go new file mode 100644 index 00000000..ee637bd0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -0,0 +1,930 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httputil" + "net/url" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" + "golang.org/x/net/publicsuffix" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + // Standard options. + + // Parsed endpoint url provided by the user. + endpointURL *url.URL + + // Holds various credential providers. + credsProvider *credentials.Credentials + + // Custom signerType value overrides all credentials. + overrideSignerType credentials.SignatureType + + // User supplied. + appInfo struct { + appName string + appVersion string + } + + // Indicate whether we are using https or not + secure bool + + // Needs allocation. + httpClient *http.Client + bucketLocCache *bucketLocationCache + + // Advanced functionality. + isTraceEnabled bool + traceErrorsOnly bool + traceOutput io.Writer + + // S3 specific accelerated endpoint. + s3AccelerateEndpoint string + + // Region endpoint + region string + + // Random seed. + random *rand.Rand + + // lookup indicates type of url lookup supported by server. If not specified, + // default to Auto. + lookup BucketLookupType + + // Factory for MD5 hash functions. + md5Hasher func() md5simd.Hasher + sha256Hasher func() md5simd.Hasher + + healthStatus int32 +} + +// Options for New method +type Options struct { + Creds *credentials.Credentials + Secure bool + Transport http.RoundTripper + Region string + BucketLookup BucketLookupType + + // Custom hash routines. Leave nil to use standard. + CustomMD5 func() md5simd.Hasher + CustomSHA256 func() md5simd.Hasher +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "v7.0.24" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// MinIO (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// BucketLookupType is type of url lookup supported by server. +type BucketLookupType int + +// Different types of url lookup supported by the server.Initialized to BucketLookupAuto +const ( + BucketLookupAuto BucketLookupType = iota + BucketLookupDNS + BucketLookupPath +) + +// New - instantiate minio client with options +func New(endpoint string, opts *Options) (*Client, error) { + if opts == nil { + return nil, errors.New("no options provided") + } + clnt, err := privateNew(endpoint, opts) + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if not. + if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV2 + } + // If Amazon S3 set to signature v4. + if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV4 + } + + return clnt, nil +} + +// EndpointURL returns the URL of the S3 endpoint. +func (c *Client) EndpointURL() *url.URL { + endpoint := *c.endpointURL // copy to prevent callers from modifying internal state + return &endpoint +} + +// lockedRandSource provides protected rand source, implements rand.Source interface. +type lockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *lockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +// Seed uses the provided seed value to initialize the generator to a +// deterministic state. +func (r *lockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +func privateNew(endpoint string, opts *Options) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, opts.Secure) + if err != nil { + return nil, err + } + + // Initialize cookies to preserve server sent cookies if any and replay + // them upon each request. + jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + + // Save the credentials. + clnt.credsProvider = opts.Creds + + // Remember whether we are using https or not + clnt.secure = opts.Secure + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = endpointURL + + transport := opts.Transport + if transport == nil { + transport, err = DefaultTransport(opts.Secure) + if err != nil { + return nil, err + } + } + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{ + Jar: jar, + Transport: transport, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + // Sets custom region, if region is empty bucket location cache is used automatically. + if opts.Region == "" { + opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL) + } + clnt.region = opts.Region + + // Instantiate bucket location cache. + clnt.bucketLocCache = newBucketLocationCache() + + // Introduce a new locked random seed. + clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + + // Add default md5 hasher. + clnt.md5Hasher = opts.CustomMD5 + clnt.sha256Hasher = opts.CustomSHA256 + if clnt.md5Hasher == nil { + clnt.md5Hasher = newMd5Hasher + } + if clnt.sha256Hasher == nil { + clnt.sha256Hasher = newSHA256Hasher + } + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined + // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. + clnt.lookup = opts.BucketLookup + + // healthcheck is not initialized + clnt.healthStatus = unknown + + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName string, appVersion string) { + // if app name and version not set, we do not set a new user agent. + if appName != "" && appVersion != "" { + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true +} + +// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. +func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { + c.TraceOn(outputStream) + c.traceErrorsOnly = true +} + +// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. +// If all tracing needs to be turned off, call TraceOff(). +func (c *Client) TraceErrorsOnlyOff() { + c.traceErrorsOnly = false +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false + c.traceErrorsOnly = false +} + +// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your +// requests. This feature is only specific to S3 for all other endpoints this +// function does nothing. To read further details on s3 transfer acceleration +// please vist - +// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3AccelerateEndpoint = accelerateEndpoint + } +} + +// Hash materials provides relevant initialized hash algo writers +// based on the expected signature type. +// +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. +func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { + hashSums = make(map[string][]byte) + hashAlgos = make(map[string]md5simd.Hasher) + if c.overrideSignerType.IsV4() { + if c.secure { + hashAlgos["md5"] = c.md5Hasher() + } else { + hashAlgos["sha256"] = c.sha256Hasher() + } + } else { + if c.overrideSignerType.IsAnonymous() { + hashAlgos["md5"] = c.md5Hasher() + } + } + if isMd5Requested { + hashAlgos["md5"] = c.md5Hasher() + } + return hashAlgos, hashSums +} + +const ( + unknown = -1 + offline = 0 + online = 1 +) + +// IsOnline returns true if healthcheck enabled and client is online +func (c *Client) IsOnline() bool { + return !c.IsOffline() +} + +// sets online healthStatus to offline +func (c *Client) markOffline() { + atomic.CompareAndSwapInt32(&c.healthStatus, online, offline) +} + +// IsOffline returns true if healthcheck enabled and client is offline +func (c *Client) IsOffline() bool { + return atomic.LoadInt32(&c.healthStatus) == offline +} + +// HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function +// and and error if health check is already started +func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) { + if atomic.LoadInt32(&c.healthStatus) == online { + return nil, fmt.Errorf("health check is running") + } + if hcDuration < 1*time.Second { + return nil, fmt.Errorf("health check duration should be atleast 1 second") + } + ctx, cancelFn := context.WithCancel(context.Background()) + atomic.StoreInt32(&c.healthStatus, online) + probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-") + go func(duration time.Duration) { + timer := time.NewTimer(duration) + defer timer.Stop() + for { + select { + case <-ctx.Done(): + atomic.StoreInt32(&c.healthStatus, unknown) + return + case <-timer.C: + timer.Reset(duration) + // Do health check the first time and ONLY if the connection is marked offline + if c.IsOffline() { + gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second) + _, err := c.getBucketLocation(gctx, probeBucketName) + gcancel() + if IsNetworkOrHostDown(err, false) { + // Still network errors do not need to do anything. + continue + } + switch ToErrorResponse(err).Code { + case "NoSuchBucket", "AccessDenied", "": + atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) + } + } + } + } + }(hcDuration) + return cancelFn, nil +} + +// requestMetadata - is container for all the values to make a request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + extraPresignHeader http.Header + expires int64 + + // Generated by our internal code. + bucketLocation string + contentBody io.Reader + contentLength int64 + contentMD5Base64 string // carries base64 encoded md5sum + contentSHA256Hex string // carries hex encoded sha256sum +} + +// dumpHTTP - dump HTTP request and response. +func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + origAuth := req.Header.Get("Authorization") + if origAuth != "" { + req.Header.Set("Authorization", redactSignature(origAuth)) + } + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c *Client) do(req *http.Request) (resp *http.Response, err error) { + defer func() { + if IsNetworkOrHostDown(err, false) { + c.markOffline() + } + }() + + resp, err = c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang versions fix this issue properly. + if urlErr, ok := err.(*url.Error); ok { + if strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), + } + } + } + return nil, err + } + + // Response cannot be non-nil, report error if thats the case. + if resp == nil { + msg := "Response is empty. " + reportIssue + return nil, errInvalidArgument(msg) + } + + // If trace is enabled, dump http request and response, + // except when the traceErrorsOnly enabled and the response's status code is ok + if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + + return resp, nil +} + +// List of success status. +var successStatus = []int{ + http.StatusOK, + http.StatusNoContent, + http.StatusPartialContent, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { + if c.IsOffline() { + return nil, errors.New(c.endpointURL.String() + " is offline.") + } + + var retryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + reqRetry := MaxRetry // Indicates how many times we can retry the request + + if metadata.contentBody != nil { + // Check if body is seekable then it is retryable. + bodySeeker, retryable = metadata.contentBody.(io.Seeker) + switch bodySeeker { + case os.Stdin, os.Stdout, os.Stderr: + retryable = false + } + // Retry only when reader is seekable + if !retryable { + reqRetry = 1 + } + + // Figure out if the body can be closed - if yes + // we will definitely close it upon the function + // return. + bodyCloser, ok := metadata.contentBody.(io.Closer) + if ok { + defer bodyCloser.Close() + } + } + + // Create cancel context to control 'newRetryTimer' go routine. + retryCtx, cancel := context.WithCancel(ctx) + + // Indicate to our routine to exit cleanly upon return. + defer cancel() + + for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { + // Retry executes the following function body if request has an + // error until maxRetries have been exhausted, retry attempts are + // performed after waiting for a given period of time in a + // binomial fashion. + if retryable { + // Seek back to beginning for each attempt. + if _, err = bodySeeker.Seek(0, 0); err != nil { + // If seek failed, no need to retry. + return nil, err + } + } + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(ctx, method, metadata) + if err != nil { + errResponse := ToErrorResponse(err) + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + return nil, err + } + // Initiate the request. + res, err = c.do(req) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err + } + + // Retry the request + continue + } + + // For any known successful http status, return quickly. + for _, httpStatus := range successStatus { + if httpStatus == res.StatusCode { + return res, nil + } + } + + // Read the body to be saved later. + errBodyBytes, err := ioutil.ReadAll(res.Body) + // res.Body should be closed + closeResponse(res) + if err != nil { + return nil, err + } + + // Save the body. + errBodySeeker := bytes.NewReader(errBodyBytes) + res.Body = ioutil.NopCloser(errBodySeeker) + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = ioutil.NopCloser(errBodySeeker) + + // Bucket region if set in error response and the error + // code dictates invalid region, we can retry the request + // with the new region. + // + // Additionally we should only retry if bucketLocation and custom + // region is empty. + if c.region == "" { + switch errResponse.Code { + case "AuthorizationHeaderMalformed": + fallthrough + case "InvalidRegion": + fallthrough + case "AccessDenied": + if errResponse.Region == "" { + // Region is empty we simply return the error. + return res, err + } + // Region is not empty figure out a way to + // handle this appropriately. + if metadata.bucketName != "" { + // Gather Cached location only if bucketName is present. + if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } + } else { + // This is for ListBuckets() fallback. + if errResponse.Region != metadata.bucketLocation { + // Retry if the error response has a different region + // than the request we just made. + metadata.bucketLocation = errResponse.Region + continue // Retry + } + } + } + } + + // Verify if error response code is retryable. + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + // Verify if http status code is retryable. + if isHTTPStatusRetryable(res.StatusCode) { + continue // Retry. + } + + // For all other cases break out of the retry loop. + break + } + + // Return an error when retry is canceled or deadlined + if e := retryCtx.Err(); e != nil { + return nil, e + } + + return res, err +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = http.MethodPost + } + + location := metadata.bucketLocation + if location == "" { + if metadata.bucketName != "" { + // Gather location only if bucketName is present. + location, err = c.getBucketLocation(ctx, metadata.bucketName) + if err != nil { + return nil, err + } + } + if location == "" { + location = getDefaultLocation(*c.endpointURL, c.region) + } + } + + // Look if target url supports virtual host. + // We explicitly disallow MakeBucket calls to not use virtual DNS style, + // since the resolution may fail. + isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0) + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, + isVirtualHost, metadata.queryValues) + if err != nil { + return nil, err + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + // Generate presign url if needed, return right here. + if metadata.expires != 0 && metadata.presignURL { + if signerType.IsAnonymous() { + return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") + } + if metadata.extraPresignHeader != nil { + if signerType.IsV2() { + return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.") + } + for k, v := range metadata.extraPresignHeader { + req.Header.Set(k, v[0]) + } + } + if signerType.IsV2() { + // Presign URL with signature v2. + req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) + } else if signerType.IsV4() { + // Presign URL with signature v4. + req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) + } + return req, nil + } + + // Set 'User-Agent' header for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // Go net/http notoriously closes the request body. + // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. + // This can cause underlying *os.File seekers to fail, avoid that + // by making sure to wrap the closer as a nop. + if metadata.contentLength == 0 { + req.Body = nil + } else { + req.Body = ioutil.NopCloser(metadata.contentBody) + } + + // Set incoming content-length. + req.ContentLength = metadata.contentLength + if req.ContentLength <= -1 { + // For unknown content length, we upload using transfer-encoding: chunked. + req.TransferEncoding = []string{"chunked"} + } + + // set md5Sum for content protection. + if len(metadata.contentMD5Base64) > 0 { + req.Header.Set("Content-Md5", metadata.contentMD5Base64) + } + + // For anonymous requests just return. + if signerType.IsAnonymous() { + return req, nil + } + + switch { + case signerType.IsV2(): + // Add signature version '2' authorization header. + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + case metadata.objectName != "" && metadata.queryValues == nil && method == http.MethodPut && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: + // Streaming signature is used by default for a PUT object request. Additionally we also + // look if the initialized client is secure, if yes then we don't need to perform + // streaming signature. + req = signer.StreamingSignV4(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) + default: + // Set sha256 sum for signature calculation only with signature version '4'. + shaHeader := unsignedPayload + if metadata.contentSHA256Hex != "" { + shaHeader = metadata.contentSHA256Hex + } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + // Add signature version '4' authorization header. + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + } + + // Return request. + return req, nil +} + +// set User agent. +func (c *Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// makeTargetURL make a new target url. +func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { + host := c.endpointURL.Host + // For Amazon S3 endpoint, try to fetch location based endpoint. + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + if c.s3AccelerateEndpoint != "" && bucketName != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + // Disable transfer acceleration for non-compliant bucket names. + if strings.Contains(bucketName, ".") { + return nil, errTransferAccelerationBucket(bucketName) + } + // If transfer acceleration is requested set new host. + // For more details about enabling transfer acceleration read here. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + host = c.s3AccelerateEndpoint + } else { + // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint + if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } + } + } + + // Save scheme. + scheme := c.endpointURL.Scheme + + // Strip port 80 and 443 so we won't send these ports in Host header. + // The reason is that browsers and curl automatically remove :80 and :443 + // with the generated presigned urls, then a signature mismatch error. + if h, p, err := net.SplitHostPort(host); err == nil { + if scheme == "http" && p == "80" || scheme == "https" && p == "443" { + host = h + if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { + host = "[" + h + "]" + } + } + } + + urlStr := scheme + "://" + host + "/" + + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support + // virtual host style. + if isVirtualHostStyle { + urlStr = scheme + "://" + bucketName + "." + host + "/" + if objectName != "" { + urlStr += s3utils.EncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + "/" + if objectName != "" { + urlStr += s3utils.EncodePath(objectName) + } + } + } + + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) + } + + return url.Parse(urlStr) +} + +// returns true if virtual hosted style requests are to be used. +func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if bucketName == "" { + return false + } + + if c.lookup == BucketLookupDNS { + return true + } + if c.lookup == BucketLookupPath { + return false + } + + // default to virtual only for Amazon/Google storage. In all other cases use + // path style requests + return s3utils.IsVirtualHostSupported(url, bucketName) +} diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go new file mode 100644 index 00000000..b7d99c69 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -0,0 +1,256 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net" + "net/http" + "net/url" + "path" + "sync" + + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// bucketLocationCache - Provides simple mechanism to hold bucket +// locations in memory. +type bucketLocationCache struct { + // mutex is used for handling the concurrent + // read/write requests for cache. + sync.RWMutex + + // items holds the cached bucket locations. + items map[string]string +} + +// newBucketLocationCache - Provides a new bucket location cache to be +// used internally with the client object. +func newBucketLocationCache() *bucketLocationCache { + return &bucketLocationCache{ + items: make(map[string]string), + } +} + +// Get - Returns a value of a given key if it exists. +func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { + r.RLock() + defer r.RUnlock() + location, ok = r.items[bucketName] + return +} + +// Set - Will persist a value into cache. +func (r *bucketLocationCache) Set(bucketName string, location string) { + r.Lock() + defer r.Unlock() + r.items[bucketName] = location +} + +// Delete - Deletes a bucket name from cache. +func (r *bucketLocationCache) Delete(bucketName string) { + r.Lock() + defer r.Unlock() + delete(r.items, bucketName) +} + +// GetBucketLocation - get location for the bucket name from location cache, if not +// fetch freshly by making a new request. +func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + return c.getBucketLocation(ctx, bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache, if not +// fetch freshly by making a new request. +func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + + // Region set then no need to fetch bucket location. + if c.region != "" { + return c.region, nil + } + + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(ctx, bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + location, err := processBucketLocationResponse(resp, bucketName) + if err != nil { + return "", err + } + c.bucketLocCache.Set(bucketName, location) + return location, nil +} + +// processes the getBucketLocation http response from the server. +func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { + if resp != nil { + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + // For access denied error, it could be an anonymous + // request. Move forward and let the top level callers + // succeed if possible based on their policy. + switch errResp.Code { + case "NotImplemented": + if errResp.Server == "AmazonSnowball" { + return "snowball", nil + } + case "AuthorizationHeaderMalformed": + fallthrough + case "InvalidRegion": + fallthrough + case "AccessDenied": + if errResp.Region == "" { + return "us-east-1", nil + } + return errResp.Region, nil + } + return "", err + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // Location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + + // Return. + return location, nil +} + +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. +func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := *c.endpointURL + + // as it works in makeTargetURL method from api.go file + if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { + if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { + targetURL.Host = h + if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { + targetURL.Host = "[" + h + "]" + } + } + } + + isVirtualHost := s3utils.IsVirtualHostSupported(targetURL, bucketName) + + var urlStr string + + // only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint + if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) { + urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" + } else { + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + urlStr = targetURL.String() + } + + // Get a new HTTP request for the method. + req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if signerType.IsAnonymous() { + return req, nil + } + + if signerType.IsV2() { + // Get Bucket Location calls should be always path style + isVirtualHost := false + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + return req, nil + } + + // Set sha256 sum for signature calculation only with signature version '4'. + contentSha256 := emptySHA256Hex + if c.secure { + contentSha256 = unsignedPayload + } + + req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + return req, nil +} diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md similarity index 67% rename from vendor/go.uber.org/zap/CODE_OF_CONDUCT.md rename to vendor/github.com/minio/minio-go/v7/code_of_conduct.md index e327d9aa..cb232c3c 100644 --- a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md +++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md @@ -4,10 +4,10 @@ In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, -body size, disability, ethnicity, gender identity and expression, level of -experience, nationality, personal appearance, race, religion, or sexual -identity and orientation. +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. ## Our Standards @@ -35,30 +35,33 @@ Examples of unacceptable behavior by participants include: Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. +response to any instances of unacceptable behavior, in compliance with the +licensing terms applying to the Project developments. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. +threatening, offensive, or harmful. However, these actions shall respect the +licensing terms of the Project Developments that will always supersede such +Code of Conduct. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an -appointed representative at an online or offline event. Representation of a -project may be further defined and clarified by project maintainers. +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at oss-conduct@uber.com. The project -team will review and investigate all complaints, and will respond in a way -that it deems appropriate to the circumstances. The project team is obligated -to maintain confidentiality with regard to the reporter of an incident. +reported by contacting the project team at dev@min.io. The project team +will review and investigate all complaints, and will respond in a way that it deems +appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good @@ -67,9 +70,11 @@ members of the project's leadership. ## Attribution -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at -[http://contributor-covenant.org/version/1/4][version]. +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +This version includes a clarification to ensure that the code of conduct is in +compliance with the free software licensing terms of the project. [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go new file mode 100644 index 00000000..dee83b87 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -0,0 +1,101 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Multipart upload defaults. + +// absMinPartSize - absolute minimum part size (5 MiB) below which +// a part in a multipart upload may not be uploaded. +const absMinPartSize = 1024 * 1024 * 5 + +// minPartSize - minimum part size 16MiB per object after which +// putObject behaves internally as multipart. +const minPartSize = 1024 * 1024 * 16 + +// maxPartsCount - maximum number of parts for a single multipart session. +const maxPartsCount = 10000 + +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. +const maxPartSize = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// Total number of parallel workers used for multipart operation. +const totalWorkers = 4 + +// Signature related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" +) + +const ( + // Storage class header. + amzStorageClass = "X-Amz-Storage-Class" + + // Website redirect location header + amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" + + // Object Tagging headers + amzTaggingHeader = "X-Amz-Tagging" + amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" + + amzVersionID = "X-Amz-Version-Id" + amzTaggingCount = "X-Amz-Tagging-Count" + amzExpiration = "X-Amz-Expiration" + amzRestore = "X-Amz-Restore" + amzReplicationStatus = "X-Amz-Replication-Status" + amzDeleteMarker = "X-Amz-Delete-Marker" + + // Object legal hold header + amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold" + + // Object retention header + amzLockMode = "X-Amz-Object-Lock-Mode" + amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date" + amzBypassGovernance = "X-Amz-Bypass-Governance-Retention" + + // Replication status + amzBucketReplicationStatus = "X-Amz-Replication-Status" + // Minio specific Replication/lifecycle transition extension + minIOBucketSourceMTime = "X-Minio-Source-Mtime" + + minIOBucketSourceETag = "X-Minio-Source-Etag" + minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" + minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" + minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" + // Header indicates last tag update time on source + minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp" + // Header indicates last retention update time on source + minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" + // Header indicates last legalhold update time on source + minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" + + minIOForceDelete = "x-minio-force-delete" +) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go new file mode 100644 index 00000000..c2a90239 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -0,0 +1,127 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "net/http" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// Core - Inherits Client and adds new methods to expose the low level S3 APIs. +type Core struct { + *Client +} + +// NewCore - Returns new initialized a Core client, this CoreClient should be +// only used under special conditions such as need to access lower primitives +// and being able to use them to write your own wrappers. +func NewCore(endpoint string, opts *Options) (*Core, error) { + var s3Client Core + client, err := New(endpoint, opts) + if err != nil { + return nil, err + } + s3Client.Client = client + return &s3Client, nil +} + +// ListObjects - List all the objects at a prefix, optionally with marker and delimiter +// you can further filter the results. +func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { + return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil) +} + +// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses +// continuationToken instead of marker to support iteration over the results. +func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) { + return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil) +} + +// CopyObject - copies an object from source object to destination object on server side. +func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { + return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts) +} + +// CopyObjectPart - creates a part in a multipart upload by copying (a +// part of) an existing object. +func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset, length int64, metadata map[string]string, +) (p CompletePart, err error) { + return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, + partID, startOffset, length, metadata) +} + +// PutObject - Upload object. Uploads using single PUT call. +func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) { + hookReader := newHook(data, opts.Progress) + return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts) +} + +// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. +func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(ctx, bucket, object, opts) + return result.UploadID, err +} + +// ListMultipartUploads - List incomplete uploads. +func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { + return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) +} + +// PutObjectPart - Upload an object part. +func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { + return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) +} + +// ListObjectParts - List uploaded parts of an incomplete upload.x +func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { + return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts) +} + +// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. +func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (string, error) { + res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ + Parts: parts, + }, opts) + return res.ETag, err +} + +// AbortMultipartUpload - Abort an incomplete upload. +func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { + return c.abortMultipartUpload(ctx, bucket, object, uploadID) +} + +// GetBucketPolicy - fetches bucket access policy for a given bucket. +func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) { + return c.getBucketPolicy(ctx, bucket) +} + +// PutBucketPolicy - applies a new bucket access policy for a given bucket. +func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error { + return c.putBucketPolicy(ctx, bucket, bucketPolicy) +} + +// GetObject is a lower level API implemented to support reading +// partial objects and also downloading objects with special conditions +// matching etag, modtime etc. +func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + return c.getObject(ctx, bucketName, objectName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go new file mode 100644 index 00000000..59f347ef --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -0,0 +1,12225 @@ +//go:build mint +// +build mint + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "archive/zip" + "bytes" + "context" + "errors" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "math/rand" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/dustin/go-humanize" + jsoniter "github.com/json-iterator/go" + log "github.com/sirupsen/logrus" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/tags" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +func cleanEmptyEntries(fields log.Fields) log.Fields { + cleanFields := log.Fields{} + for k, v := range fields { + if v != "" { + cleanFields[k] = v + } + } + return cleanFields +} + +// log successful test runs +func successLogger(testName string, function string, args map[string]interface{}, startTime time.Time) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + // log with the fields as per mint + fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"} + return log.WithFields(cleanEmptyEntries(fields)) +} + +// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, +// and log as NA in that case and continue execution. Otherwise log as failure and return +func logError(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) { + // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests + // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in + // addition to NotImplemented error returned from server + if isErrNotImplemented(err) { + ignoredLog(testName, function, args, startTime, message).Info() + } else if isRunOnFail() { + failureLog(testName, function, args, startTime, alert, message, err).Error() + } else { + failureLog(testName, function, args, startTime, alert, message, err).Fatal() + } +} + +// log failed test runs +func failureLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + var fields log.Fields + // log with the fields as per mint + if err != nil { + fields = log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err, + } + } else { + fields = log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, + } + } + return log.WithFields(cleanEmptyEntries(fields)) +} + +// log not applicable test runs +func ignoredLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + // log with the fields as per mint + fields := log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented", + } + return log.WithFields(cleanEmptyEntries(fields)) +} + +// Delete objects in given bucket, recursively +func cleanupBucket(bucketName string, c *minio.Client) error { + // Create a done channel to control 'ListObjectsV2' go routine. + doneCh := make(chan struct{}) + // Exit cleanly upon return. + defer close(doneCh) + // Iterate over all objects in the bucket via listObjectsV2 and delete + for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { + if objCh.Err != nil { + return objCh.Err + } + if objCh.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + return err + } + return err +} + +func cleanupVersionedBucket(bucketName string, c *minio.Client) error { + doneCh := make(chan struct{}) + defer close(doneCh) + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + if obj.Err != nil { + return obj.Err + } + if obj.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, obj.Key, + minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + log.Println("found", obj.Key, obj.VersionID) + } + return err + } + return err +} + +func isErrNotImplemented(err error) bool { + return minio.ToErrorResponse(err).Code == "NotImplemented" +} + +func isRunOnFail() bool { + return os.Getenv("RUN_ON_FAIL") == "1" +} + +func init() { + // If server endpoint is not set, all tests default to + // using https://play.min.io + if os.Getenv(serverEndpoint) == "" { + os.Setenv(serverEndpoint, "play.min.io") + os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") + os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") + os.Setenv(enableHTTPS, "1") + } +} + +var mintDataDir = os.Getenv("MINT_DATA_DIR") + +func getMintDataDirFilePath(filename string) (fp string) { + if mintDataDir == "" { + return + } + return filepath.Join(mintDataDir, filename) +} + +func newRandomReader(seed, size int64) io.Reader { + return io.LimitReader(rand.New(rand.NewSource(seed)), size) +} + +func mustCrcReader(r io.Reader) uint32 { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + return crc.Sum32() +} + +func crcMatches(r io.Reader, want uint32) error { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +func crcMatchesName(r io.Reader, name string) error { + want := dataFileCRC32[name] + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +// read data from file if it exists or optionally create a buffer of particular size +func getDataReader(fileName string) io.ReadCloser { + if mintDataDir == "" { + size := int64(dataFileMap[fileName]) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) + } + return ioutil.NopCloser(newRandomReader(size, size)) + } + reader, _ := os.Open(getMintDataDirFilePath(fileName)) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(reader) + reader.Close() + reader, _ = os.Open(getMintDataDirFilePath(fileName)) + } + return reader +} + +// randString generates random names and prepends them with a known prefix. +func randString(n int, src rand.Source, prefix string) string { + b := make([]byte, n) + // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! + for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +var dataFileMap = map[string]int{ + "datafile-0-b": 0, + "datafile-1-b": 1, + "datafile-1-kB": 1 * humanize.KiByte, + "datafile-10-kB": 10 * humanize.KiByte, + "datafile-33-kB": 33 * humanize.KiByte, + "datafile-100-kB": 100 * humanize.KiByte, + "datafile-1.03-MB": 1056 * humanize.KiByte, + "datafile-1-MB": 1 * humanize.MiByte, + "datafile-5-MB": 5 * humanize.MiByte, + "datafile-6-MB": 6 * humanize.MiByte, + "datafile-11-MB": 11 * humanize.MiByte, + "datafile-65-MB": 65 * humanize.MiByte, + "datafile-129-MB": 129 * humanize.MiByte, +} + +var dataFileCRC32 = map[string]uint32{} + +func isFullMode() bool { + return os.Getenv("MINT_MODE") == "full" +} + +func getFuncName() string { + return getFuncNameLoc(2) +} + +func getFuncNameLoc(caller int) string { + pc, _, _, _ := runtime.Caller(caller) + return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") +} + +// Tests bucket re-create errors. +func testMakeBucketError() { + region := "eu-central-1" + + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket Failed", err) + return + } + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "Bucket already exists", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testMetadataSizeLimit() { + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts.UserMetadata": "", + } + rand.Seed(startTime.Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const HeaderSizeLimit = 8 * 1024 + const UserMetadataLimit = 2 * 1024 + + // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail + metadata := make(map[string]string) + metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) + return + } + + // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail + metadata = make(map[string]string) + metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegions() { + region := "eu-central-1" + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + region = "us-west-2" + args["region"] = region + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectReadAt() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "objectContentType", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object content type + objectContentType := "binary/octet-stream" + args["objectContentType"] = objectContentType + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Get Object failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat Object failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) + return + } + if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content types don't match", err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testListObjectVersions() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjectVersions(bucketName, prefix, recursive)" + args := map[string]interface{}{ + "bucketName": "", + "prefix": "", + "recursive": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + reader := getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected object deletion", err) + return + } + + var deleteMarkers, versions int + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if info.Key != objectName { + logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) + return + } + if info.IsDeleteMarker { + deleteMarkers++ + if !info.IsLatest { + logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) + return + } + } else { + versions++ + } + } + + if deleteMarkers != 1 { + logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) + return + } + + if versions != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStatObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "StatObject" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + reader := getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + for i := 0; i < len(results); i++ { + opts := minio.StatObjectOptions{VersionID: results[i].VersionID} + statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during HEAD object", err) + return + } + if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testGetObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Save the contents of datafiles to check with GetObject() reader output later + var buffers [][]byte + testFiles := []string{"datafile-1-b", "datafile-10-kB"} + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + buffers = append(buffers, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.SliceStable(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testPutObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + const n = 10 + // Read input... + + // Save the data concurrently. + var wg sync.WaitGroup + wg.Add(n) + buffers := make([][]byte, n) + var errs [n]error + for i := 0; i < n; i++ { + r := newRandomReader(int64((1<<20)*i+i), int64(i)) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + buffers[i] = buf + + go func(i int) { + defer wg.Done() + _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20}) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.Slice(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.Slice(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + testFiles := []string{"datafile-1-b", "datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testConcurrentCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + testFiles := []string{"datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy concurrently + const n = 10 + var wg sync.WaitGroup + wg.Add(n) + var errs [n]error + for i := 0; i < n; i++ { + go func(i int) { + defer wg.Done() + _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object}) + infos = []minio.ObjectInfo{} + for info := range objectsInfo { + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + infos = append(infos, info) + } + + if len(infos) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testComposeObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + testFiles := []string{"datafile-5-MB", "datafile-10-kB"} + var testFilesBytes [][]byte + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + testFilesBytes = append(testFilesBytes, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size > results[j].Size + }) + + // Source objects to concatenate. We also specify decryption + // key for each + src1 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[0].VersionID, + } + + src2 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[1].VersionID, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + + _, err = c.ComposeObject(context.Background(), dst, src1, src2) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) + return + } + defer readerCopy.Close() + + copyContentBytes, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) + return + } + + var expectedContent []byte + for _, fileBytes := range testFilesBytes { + expectedContent = append(expectedContent, fileBytes...) + } + + if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testRemoveObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var version minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + version = info + break + } + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) + return + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testRemoveObjectsWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObjects()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsVersions := make(chan minio.ObjectInfo) + go func() { + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, + minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsVersionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + objectsVersions <- info + } + close(objectsVersions) + }() + + removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) + return + } + + for e := range removeErrors { + if e.Err != nil { + logError(testName, function, args, startTime, "", "Single delete operation failed", err) + return + } + } + + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsVersionsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testObjectTaggingWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "{Get,Set,Remove}ObjectTagging()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var versions []minio.ObjectInfo + for info := range versionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + versions = append(versions, info) + } + + sort.SliceStable(versions, func(i, j int) bool { + return versions[i].Size < versions[j].Size + }) + + tagsV1 := map[string]string{"key1": "val1"} + t1, err := tags.MapToObjectTags(tagsV1) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + tagsV2 := map[string]string{"key2": "val2"} + t2, err := tags.MapToObjectTags(tagsV2) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + tagsEqual := func(tags1, tags2 map[string]string) bool { + for k1, v1 := range tags1 { + v2, found := tags2[k1] + if found { + if v1 != v2 { + return false + } + } + } + return true + } + + gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) + return + } + + gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) + return + } + + if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, + minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if len(emptyTags.ToMap()) != 0 { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectWithMetadata() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object custom metadata + customContentType := "custom/contenttype" + + args["metadata"] = map[string][]string{ + "Content-Type": {customContentType}, + "X-Amz-Meta-CustomKey": {"extra spaces in value"}, + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: customContentType, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testPutObjectWithContentLanguage() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + data := []byte{} + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ + ContentLanguage: "en", + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with streaming signature. +func testPutObjectStreaming() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} + + for _, size := range sizes { + data := newRandomReader(size, size) + ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != size { + logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if objInfo.Size != size { + logError(testName, function, args, startTime, "", "Unexpected size", err) + return + } + + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object seeker from the end, using whence set to '2'. +func testGetObjectSeekEnd() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + pos, err := r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + buf2 := make([]byte, 100) + m, err := readFull(r, buf2) + if err != nil { + logError(testName, function, args, startTime, "", "Error reading through readFull", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) + return + } + hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) + hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) + if hexBuf1 != hexBuf2 { + logError(testName, function, args, startTime, "", "Values at same index dont match", err) + return + } + pos, err = r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + if err = r.Close(); err != nil { + logError(testName, function, args, startTime, "", "ObjectClose failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwice() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test RemoveObjects request where context cancels after timeout +func testRemoveObjectsContext() { + // Initialize logging params. + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(ctx, bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current tie. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate put data. + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 20 objects. + nrObjects := 20 + objectsCh := make(chan minio.ObjectInfo) + go func() { + defer close(objectsCh) + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + // Set context to cancel in 1 nanosecond. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Call RemoveObjects API with short timeout. + errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + // Check for error. + select { + case r := <-errorCh: + if r.Err == nil { + logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) + return + } + } + // Set context with longer timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + select { + case r, more := <-errorCh: + if more || r.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test removing multiple objects with Remove API +func testRemoveMultipleObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 1100 objects + nrObjects := 200 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + // Check if errorCh doesn't receive any error + select { + case r, more := <-errorCh: + if more { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test removing multiple objects and check for results +func testRemoveMultipleObjectsWithResult() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupVersionedBucket(bucketName, c) + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + nrObjects := 10 + nrLockedObjects := 5 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if i < nrLockedObjects { + // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC) + t := time.Now().Add(5 * time.Minute) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + VersionID: info.VersionID, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + } + + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + var foundNil, foundErr int + + for { + // Check if errorCh doesn't receive any error + select { + case deleteRes, ok := <-resultCh: + if !ok { + goto out + } + if deleteRes.ObjectName == "" { + logError(testName, function, args, startTime, "", "Unexpected object name", nil) + return + } + if deleteRes.ObjectVersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected object version ID", nil) + return + } + + if deleteRes.Err == nil { + foundNil++ + } else { + foundErr++ + } + } + } +out: + if foundNil+foundErr != nrObjects { + logError(testName, function, args, startTime, "", "Unexpected number of results", nil) + return + } + + if foundNil != nrObjects-nrLockedObjects { + logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil) + return + } + + if foundErr != nrLockedObjects { + logError(testName, function, args, startTime, "", "Unexpected number of errors", nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject of a big file to trigger multipart +func testFPutObjectMultipart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + fileName := getMintDataDirFilePath("datafile-129-MB") + if fileName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + fileName = file.Name() + args["fileName"] = fileName + } + totalSize := dataFileMap["datafile-129-MB"] + // Set base object name + objectName := bucketName + "FPutObject" + "-standard" + args["objectName"] = objectName + + objectContentType := "testapplication/octet-stream" + args["objectContentType"] = objectContentType + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", err) + return + } + if objInfo.Size != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) + return + } + if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType doesn't match", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject with null contentType (default = application/octet-stream) +func testFPutObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + + // Make a new bucket. + args["bucketName"] = bucketName + args["location"] = location + function = "MakeBucket(bucketName, location)" + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-129-MB") + if fName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + function = "FPutObject(bucketName, objectName, fileName, opts)" + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + "-standard" + args["fileName"] = fName + args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + if ui.Size != int64(dataFileMap["datafile-129-MB"]) { + logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + srcFile, err := os.Open(fName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + defer srcFile.Close() + // Add extension to temp file name + tmpFile, err := os.Create(fName + ".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "File create failed", err) + return + } + _, err = io.Copy(tmpFile, srcFile) + if err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + tmpFile.Close() + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-GTar" + args["opts"] = minio.PutObjectOptions{} + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-standard" + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-Octet" + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-GTar" + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err) + return + } + + os.Remove(fName + ".gtar") + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "Temp file creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject with context to see if request cancellation is honored. +func testPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "opts": "", + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + cancel() + args["ctx"] = ctx + args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object with s3zip extensions. +func testGetObjectS3Zip() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{"x-minio-extract": true} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip" + args["objectName"] = objectName + + var zipFile bytes.Buffer + zw := zip.NewWriter(&zipFile) + rng := rand.New(rand.NewSource(0xc0cac01a)) + const nFiles = 500 + for i := 0; i <= nFiles; i++ { + if i == nFiles { + // Make one large, compressible file. + i = 1000000 + } + b := make([]byte, i) + if i < nFiles { + rng.Read(b) + } + wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i)) + if err != nil { + logError(testName, function, args, startTime, "", "zw.Create failed", err) + return + } + wc.Write(b) + } + err = zw.Close() + if err != nil { + logError(testName, function, args, startTime, "", "zw.Close failed", err) + return + } + buf := zipFile.Bytes() + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err) + return + } + r.Close() + + zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) + if err != nil { + logError(testName, function, args, startTime, "", "zip.NewReader failed", err) + return + } + lOpts := minio.ListObjectsOptions{} + lOpts.Set("x-minio-extract", "true") + lOpts.Prefix = objectName + "/" + lOpts.Recursive = true + list := c.ListObjects(context.Background(), bucketName, lOpts) + listed := map[string]minio.ObjectInfo{} + for item := range list { + if item.Err != nil { + break + } + listed[item.Key] = item + } + if len(listed) == 0 { + // Assume we are running against non-minio. + args["SKIPPED"] = true + ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info() + return + } + + for _, file := range zr.File { + if file.FileInfo().IsDir() { + continue + } + args["zipfile"] = file.Name + zfr, err := file.Open() + if err != nil { + logError(testName, function, args, startTime, "", "file.Open failed", err) + return + } + want, err := ioutil.ReadAll(zfr) + if err != nil { + logError(testName, function, args, startTime, "", "fzip file read failed", err) + return + } + + opts := minio.GetObjectOptions{} + opts.Set("x-minio-extract", "true") + key := path.Join(objectName, file.Name) + r, err = c.GetObject(context.Background(), bucketName, key, opts) + if err != nil { + terr := minio.ToErrorResponse(err) + if terr.StatusCode != http.StatusNotFound { + logError(testName, function, args, startTime, "", "GetObject failed", err) + } + return + } + got, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + r.Close() + if !bytes.Equal(want, got) { + logError(testName, function, args, startTime, "", "Content mismatch", err) + return + } + oi, ok := listed[key] + if !ok { + logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key)) + return + } + if int(oi.Size) != len(got) { + logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got))) + return + } + delete(listed, key) + } + delete(args, "zipfile") + if len(listed) > 0 { + logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed)) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + // Generic seek error for errors other than io.EOF + seekErr := errors.New("seek error") + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, seekErr, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, seekErr, false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + // We expect an error + if testCase.err == seekErr && err == nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // We expect a specific error + if testCase.err != seekErr && testCase.err != err { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // If we expect an error go to the next loop + if testCase.err != nil { + continue + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Reproduces issue https://github.com/minio/minio-go/issues/1137 +func testGetObjectReadAtWhenEOFWasReached() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read directly + buf1 := make([]byte, len(buf)) + buf2 := make([]byte, 512) + + m, err := r.Read(buf1) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf) { + logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, 512) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[512:1024]) { + logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test Presigned Post Policy +func testPresignedPostPolicy() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + policy := minio.NewPostPolicy() + + if err := policy.SetBucket(""); err == nil { + logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) + return + } + if err := policy.SetKey(""); err == nil { + logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) + return + } + if err := policy.SetKeyStartsWith(""); err == nil { + logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) + return + } + if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { + logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) + return + } + if err := policy.SetContentType(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) + return + } + if err := policy.SetContentTypeStartsWith(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err) + return + } + if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { + logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) + return + } + if err := policy.SetUserMetadata("", ""); err == nil { + logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) + return + } + + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) + } + + // Get a 33KB file to upload and test if set post policy works + filePath := getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // make post request with correct form data + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + // expected path should be absolute path of the object + var scheme string + if mustParseBool(os.Getenv(enableHTTPS)) { + scheme = "https://" + } else { + scheme = "http://" + } + + expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName + + if val, ok := res.Header["Location"]; ok { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { + logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) + return + } + } else { + logError(testName, function, args, startTime, "", "Location not found in header response", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests copy object +func testCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(dst, src)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + // Set copy conditions. + MatchETag: objInfo.ETag, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + } + args["src"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) + return + } + + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "copy data CRC check failed", err) + return + } + // Close all the get readers before proceeding with CopyObject operations. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName, + ReplaceMetadata: true, + UserMetadata: map[string]string{ + "Copy": "should be same", + }, + } + args["dst"] = dst + args["src"] = src + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) + return + } + + oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + stOpts := minio.StatObjectOptions{} + stOpts.SetMatchETag(oi.ETag) + objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) + return + } + + if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { + logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderSeeker interface methods. +func testSSECEncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-S3 get object ReaderSeeker interface methods. +func testSSES3EncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderAt interface methods. +func testSSECEncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-S3 get object ReaderAt interface methods. +func testSSES3EncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// testSSECEncryptionPutGet tests encryption with customer provided encryption keys +func testSSECEncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(testName, function, args, startTime).Info() + + } + + successLogger(testName, function, args, startTime).Info() +} + +// TestEncryptionFPut tests encryption with customer specified encryption keys +func testSSECEncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + successLogger(testName, function, args, startTime).Info() +} + +// testSSES3EncryptionPutGet tests SSE-S3 encryption +func testSSES3EncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back without any encryption headers + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(testName, function, args, startTime).Info() + + } + + successLogger(testName, function, args, startTime).Info() +} + +// TestSSES3EncryptionFPut tests server side encryption +func testSSES3EncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + successLogger(testName, function, args, startTime).Info() +} + +func testBucketNotification() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketNotification(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + if os.Getenv("NOTIFY_BUCKET") == "" || + os.Getenv("NOTIFY_SERVICE") == "" || + os.Getenv("NOTIFY_REGION") == "" || + os.Getenv("NOTIFY_ACCOUNTID") == "" || + os.Getenv("NOTIFY_RESOURCE") == "" { + ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + bucketName := os.Getenv("NOTIFY_BUCKET") + args["bucketName"] = bucketName + + topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) + queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") + + topicConfig := notification.NewConfig(topicArn) + topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) + topicConfig.AddFilterSuffix("jpg") + + queueConfig := notification.NewConfig(queueArn) + queueConfig.AddEvents(notification.ObjectCreatedAll) + queueConfig.AddFilterPrefix("photos/") + + config := notification.Configuration{} + config.AddTopic(topicConfig) + + // Add the same topicConfig again, should have no effect + // because it is duplicated + config.AddTopic(topicConfig) + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Duplicate entry added", err) + return + } + + // Add and remove a queue config + config.AddQueue(queueConfig) + config.RemoveQueueByArn(queueArn) + + err = c.SetBucketNotification(context.Background(), bucketName, config) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) + return + } + + config, err = c.GetBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) + return + } + + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Topic config is empty", err) + return + } + + if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { + logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) + return + } + + err = c.RemoveAllBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests comprehensive list of all methods. +func testFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctional()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket. + function = "MakeBucket(bucketName, region)" + functionAll = "MakeBucket(bucketName, region)" + args["bucketName"] = bucketName + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + + defer cleanupBucket(bucketName, c) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File creation failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "File write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find the bucket", err) + return + } + + // Asserting the default bucket policy. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + if nilPolicy != "" { + logError(testName, function, args, startTime, "", "policy should be set to nil", err) + return + } + + // Set the bucket policy to 'public readonly'. + function = "SetBucketPolicy(bucketName, readOnlyPolicy)" + functionAll += ", " + function + + readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readOnlyPolicy, + } + + err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public writeonly'. + function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" + functionAll += ", " + function + + writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": writeOnlyPolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `writeonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, readWritePolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readwrite`. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("f"), 1<<19) + + function = "PutObject(bucketName, objectName, reader, contentType)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-nolength", + "contentType": "binary/octet-stream", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + objFound = false + isRecursive = true // Recursive is true. + function = "ListObjects()" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + incompObjNotFound := true + + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) + return + } + newReader.Close() + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject failed", err) + return + } + + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + resp.Body.Close() + + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedGetObject success", err) + return + } + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + "reqParams": reqParams, + } + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) + return + } + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedPutObject success", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + buf = bytes.Repeat([]byte("g"), 1<<19) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err != nil { + logError(testName, function, args, startTime, "", "Presigned failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + req.Header.Add("mysecret", "abcxxx") + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err) + return + } + + function = "RemoveObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + args["objectName"] = objectName + "-f" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-nolength" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presigned" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presign-custom" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + function = "RemoveBucket(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + err = c.RemoveBucket(context.Background(), bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + err = c.RemoveBucket(context.Background(), bucketName) + if err == nil { + logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) + return + } + if err.Error() != "The specified bucket does not exist" { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + successLogger(testName, functionAll, args, startTime).Info() +} + +// Test for validating GetObject Reader* methods functioning when the +// object is modified in the object store. +func testGetObjectModified() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + objectName := "myobject" + args["objectName"] = objectName + content := "helloworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) + return + } + defer reader.Close() + + // Read a few bytes of the object. + b := make([]byte, 5) + n, err := reader.ReadAt(b, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) + return + } + + // Upload different contents to the same object while object is being read. + newContent := "goodbyeworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + // Confirm that a Stat() call in between doesn't change the Object's cached etag. + _, err = reader.Stat() + expectedError := "At least one of the pre-conditions you specified did not hold" + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + // Read again only to find object contents have been modified since last read. + _, err = reader.ReadAt(b, int64(n)) + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject to upload a file seeked at a given offset. +func testPutObjectUploadSeekedObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, fileToUpload, contentType)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileToUpload": "", + "contentType": "binary/octet-stream", + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + var tempfile *os.File + + if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { + tempfile, err = os.Open(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + args["fileToUpload"] = fileName + } else { + tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile create failed", err) + return + } + args["fileToUpload"] = tempfile.Name() + + // Generate 100kB data + if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + defer os.Remove(tempfile.Name()) + + // Seek back to the beginning of the file. + tempfile.Seek(0, 0) + } + length := 100 * humanize.KiByte + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + offset := length / 2 + if _, err = tempfile.Seek(int64(offset), 0); err != nil { + logError(testName, function, args, startTime, "", "TempFile seek failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + tempfile.Close() + + obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer obj.Close() + + n, err := obj.Seek(int64(offset), 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != int64(offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(length-offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests bucket re-create errors. +func testMakeBucketErrorV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + region := "eu-west-1" + args["bucketName"] = bucketName + args["region"] = region + + // Make a new bucket in 'eu-west-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwiceV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject hidden contentType setting +func testFPutObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a temp file with 11*1024*1024 bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) + n, err := io.CopyN(file, r, 11*1024*1024) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + // Set base object name + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + args["fileName"] = file.Name() + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Add extension to temp file name + fileName := file.Name() + err = os.Rename(fileName, fileName+".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "Rename failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + args["fileName"] = fileName + ".gtar" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers and sizes + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if rStandard.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) + return + } + + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) + return + } + + if rOctet.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err) + return + } + + os.Remove(fileName + ".gtar") + successLogger(testName, function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegionsV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { + args["bucketName"] = bucketName + ".withperiod" + args["region"] = "us-west-2" + logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + n, err := r.Seek(offset, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + n, err = r.Seek(0, 1) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + _, err = r.Seek(offset, 2) + if err == nil { + logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) + return + } + n, err = r.Seek(-offset, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != st.Size-offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) + return + } + + var buffer1 bytes.Buffer + if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + // Seek again and read again. + n, err = r.Seek(offset-1, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != (offset - 1) { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) + return + } + + var buffer2 bytes.Buffer + if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + // Verify now lesser bytes. + if !bytes.Equal(buf[2047:], buffer2.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, bufSize) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, bufSize+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests copy object +func testCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + args["source"] = src + + // Set copy conditions. + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + args["destination"] = dst + + // Perform the Copy + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) + return + } + + // Close all the readers. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testComposeObjectErrorCasesWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Test that more than 10K source objects cannot be + // concatenated. + srcArr := [10001]minio.CopySrcOptions{} + srcSlice := srcArr[:] + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "object", + } + + args["destination"] = dst + // Just explain about srcArr in args["sourceList"] + // to stop having 10,001 null headers logged + args["sourceList"] = "source array of 10,001 elements" + if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { + logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) + return + } else if err.Error() != "There must be as least one and up to 10000 source objects." { + logError(testName, function, args, startTime, "", "Got unexpected error", err) + return + } + + // Create a source with invalid offset spec and check that + // error is returned: + // 1. Create the source object. + const badSrcSize = 5 * 1024 * 1024 + buf := bytes.Repeat([]byte("1"), badSrcSize) + _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // 2. Set invalid range spec on the object (going beyond + // object size) + badSrc := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "badObject", + MatchRange: true, + Start: 1, + End: badSrcSize, + } + + // 3. ComposeObject call should fail. + if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { + logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) + return + } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { + logError(testName, function, args, startTime, "", "Got invalid error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCasesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +func testComposeMultipleSources(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{ + "destination": "", + "sourceList": "", + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload a small source object + const srcSize = 1024 * 1024 * 5 + buf := bytes.Repeat([]byte("1"), srcSize) + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // We will append 10 copies of the object. + srcs := []minio.CopySrcOptions{} + for i := 0; i < 10; i++ { + srcs = append(srcs, minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + }) + } + + // make the last part very small + srcs[9].MatchRange = true + + args["sourceList"] = srcs + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + } + args["destination"] = dst + + ui, err := c.ComposeObject(context.Background(), dst, srcs...) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + if ui.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) + return + } + + objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objProps.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test concatenating multiple 10K objects V2 +func testCompose10KSourcesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +func testEncryptedEmptyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 0 + var buf []byte // Empty buffer + args["objectName"] = "object" + _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + // 2. Test CopyObject for an empty object + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "object", + Encryption: sse, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) + return + } + + // 3. Test Key rotation + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: newSSE, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) + return + } + + // 4. Download the object. + reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) + return + } + + delete(args, "objectName") + successLogger(testName, function, args, startTime).Info() +} + +func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { + // initialize logging params + startTime := time.Now() + testName := getFuncNameLoc(2) + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + var srcEncryption, dstEncryption encrypt.ServerSide + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ServerSideEncryption: sseSrc, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + if sseSrc != nil && sseSrc.Type() != encrypt.S3 { + srcEncryption = sseSrc + } + + // 2. copy object and change encryption key + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: srcEncryption, + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + Encryption: sseDst, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + if sseDst != nil && sseDst.Type() != encrypt.S3 { + dstEncryption = sseDst + } + // 3. get copied object and check if content is equal + coreClient := minio.Core{c} + reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test key rotation for source object in-place. + var newSSE encrypt.ServerSide + if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { + newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key + } + if sseSrc != nil && sseSrc.Type() == encrypt.S3 { + newSSE = encrypt.NewSSE() + } + if newSSE != nil { + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied object and check if content is equal + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test in-place decryption. + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + } + args["destination"] = dst + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["source"] = src + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) + return + } + } + + // Get copied decrypted object and check if content is equal + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test encrypted copy object +func testUnencryptedToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc encrypt.ServerSide + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc, sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +func testDecryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ + ServerSideEncryption: encryption, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + Encryption: encrypt.SSECopy(encryption), + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "decrypted-" + objectName, + } + args["destination"] = dst + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +func testSSECMultipartEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 6MB of data + buf := bytes.Repeat([]byte("abcdef"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + // Upload a 6MB object using multipart mechanism + uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + var completeParts []minio.CompletePart + + part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (6*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 6*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 6*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) + return + } + + getOpts.SetRange(6*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 6*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:6*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) + return + } + if getBuf[6*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation +func testSSECEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy +func testSSECEncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + var dstencryption encrypt.ServerSide + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy +func testSSECEncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part +func testUnencryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testUnencryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testUnencryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part +func testSSES3EncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testSSES3EncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testSSES3EncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +func testUserMetadataCopying() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testUserMetadataCopyingWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + h.Add(k, vs[0]) + } + } + return h + } + + // 1. create a client encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + metadata := make(http.Header) + metadata.Set("x-amz-meta-myheader", "myvalue") + m := make(map[string]string) + m["x-amz-meta-myheader"] = "myvalue" + _, err = c.PutObject(context.Background(), bucketName, "srcObject", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) + return + } + if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 2. create source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + } + + // 2.1 create destination with metadata set + dst1 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-1", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + // 3. Check that copying to an object with metadata set resets + // the headers on the copy. + args["source"] = src + args["destination"] = dst1 + _, err = c.CopyObject(context.Background(), dst1, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders := make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 4. create destination with no metadata set and same source + dst2 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-2", + } + + // 5. Check that copying to an object with no metadata set, + // copies metadata. + args["source"] = src + args["destination"] = dst2 + _, err = c.CopyObject(context.Background(), dst2, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders = metadata + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 6. Compose a pair of sources. + dst3 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-3", + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst3 + _, err = c.ComposeObject(context.Background(), dst3, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 7. Compose a pair of sources with dest user metadata set. + dst4 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-4", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst4 + _, err = c.ComposeObject(context.Background(), dst4, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + expectedHeaders = make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testUserMetadataCopyingV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testStorageClassMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClass") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStorageClassInvalidMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassInvalidMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStorageClassMetadataCopyObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataCopyObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + args["bucket"] = bucketName + args["object"] = object + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) + + // Put an object with RRS Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClass", + } + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClassCopy") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + // Put an object with Standard Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectSSClass", + } + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectSSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) + return + } + // Fetch the meta data of copied object + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with size -1 byte object. +func testPutObjectNoLengthV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + args["size"] = bufSize + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put objects of unknown size. +func testPutObjectsUnknownV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Issues are revealed by trying to upload multiple files of unknown size + // sequentially (on 4GB machines) + for i := 1; i <= 4; i++ { + // Simulate that we could be receiving byte slices of data that we want + // to upload as a file + rpipe, wpipe := io.Pipe() + defer rpipe.Close() + go func() { + b := []byte("test") + wpipe.Write(b) + wpipe.Close() + }() + + // Upload the object. + objectName := fmt.Sprintf("%sunique%d", bucketName, i) + args["objectName"] = objectName + + ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != 4 { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) + return + } + + if st.Size != int64(4) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) + return + } + + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with 0 byte object. +func testPutObject0ByteV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) + return + } + if st.Size != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCases() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +// Test concatenating multiple 10K objects V4 +func testCompose10KSources() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +// Tests comprehensive list of all methods. +func testFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctionalV2()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + // Make a new bucket. + function = "MakeBucket(bucketName, location)" + functionAll = "MakeBucket(bucketName, location)" + args = map[string]interface{}{ + "bucketName": bucketName, + "location": location, + } + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, bucketPolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + objectNameNoLength := objectName + "-nolength" + args["objectName"] = objectNameNoLength + _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) + return + } + + incompObjNotFound := true + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FgetObject failed", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "Got empty ETag", err) + return + } + resp.Body.Close() + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + // Generate presigned GET object url. + args["reqParams"] = reqParams + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + // Verify content disposition. + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err == nil { + logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + successLogger(testName, functionAll, args, startTime).Info() +} + +// Test get object with GetObject with context +func testGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "object Close() call failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object with FGetObject with a user provided context +func testFGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object with GetObject with a user provided context +func testGetObjectRanges() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + rng := rand.NewSource(time.Now().UnixNano()) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rng, "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rng, "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + tests := []struct { + start int64 + end int64 + }{ + { + start: 1024, + end: 1024 + 1<<20, + }, + { + start: 20e6, + end: 20e6 + 10000, + }, + { + start: 40e6, + end: 40e6 + 10000, + }, + { + start: 60e6, + end: 60e6 + 10000, + }, + { + start: 80e6, + end: 80e6 + 10000, + }, + { + start: 120e6, + end: int64(bufSize), + }, + } + for _, test := range tests { + wantRC := getDataReader("datafile-129-MB") + io.CopyN(ioutil.Discard, wantRC, test.start) + want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) + opts := minio.GetObjectOptions{} + opts.SetRange(test.start, test.end) + args["opts"] = fmt.Sprintf("%+v", test) + obj, err := c.GetObject(ctx, bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + err = crcMatches(obj, want) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object ACLs with GetObjectACL with custom provided context +func testGetObjectACLContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectACL(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData := map[string]string{ + "X-Amz-Acl": "public-read-write", + } + + _, err = c.PutObject(context.Background(), bucketName, + objectName, reader, int64(bufSize), + minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + UserMetadata: metaData, + }) + + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr != nil { + logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr) + return + } + + s, ok := objectInfo.Metadata["X-Amz-Acl"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + // Do a very limited testing if this is not AWS S3 + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + if s[0] != "private" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + successLogger(testName, function, args, startTime).Info() + return + } + + if s[0] != "public-read-write" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + bufSize = dataFileMap["datafile-1-MB"] + reader2 := getDataReader("datafile-1-MB") + defer reader2.Close() + // Save the data + objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData = map[string]string{ + "X-Amz-Grant-Read": "id=fooread@minio.go", + "X-Amz-Grant-Write": "id=foowrite@minio.go", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) + return + } + + if len(objectInfo.Metadata) != 3 { + logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "fooread@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "foowrite@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject with context to see if request cancellation is honored for V2. +func testPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + bufSize := dataFileMap["datatfile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + args["ctx"] = ctx + args["size"] = bufSize + defer cancel() + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object with GetObject with custom context +func testGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", " object Close() call failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object with FGetObject with custom context +func testFGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName,fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datatfile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) + return + } + + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test list object v1 and V2 +func testListObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testObjects := []struct { + name string + storageClass string + }{ + // Special characters + {"foo bar", "STANDARD"}, + {"foo-%", "STANDARD"}, + {"random-object-1", "STANDARD"}, + {"random-object-2", "REDUCED_REDUNDANCY"}, + } + + for i, object := range testObjects { + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), + minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) + return + } + } + + testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { + var objCursor int + + // check for object name and storage-class from listing object result + for objInfo := range listFn(context.Background(), bucket, opts) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) + return + } + if objInfo.Key != testObjects[objCursor].name { + logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) + return + } + if objInfo.StorageClass != testObjects[objCursor].storageClass { + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() + } + objCursor++ + } + + if objCursor != len(testObjects) { + logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) + return + } + } + + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) + + successLogger(testName, function, args, startTime).Info() +} + +// Test deleting multiple objects with object retention set in Governance mode +func testRemoveObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + return + } + + // Replace with smaller... + bufSize = dataFileMap["datafile-10-kB"] + reader = getDataReader("datafile-10-kB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + } + + t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + + objectsCh := make(chan minio.ObjectInfo) + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return + } + objectsCh <- object + } + }() + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { + // Error is expected here because Retention is set on the object + // and RemoveObjects is called without Bypass Governance + if rErr.Err == nil { + logError(testName, function, args, startTime, "", "Expected error during deletion", nil) + return + } + } + + objectsCh1 := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh1) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return + } + objectsCh1 <- object + } + }() + + opts1 := minio.RemoveObjectsOptions{ + GovernanceBypass: true, + } + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { + // Error is not expected here because Retention is set on the object + // and RemoveObjects is called with Bypass Governance + logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Convert string to bool and always return false if any error +func mustParseBool(str string) bool { + b, err := strconv.ParseBool(str) + if err != nil { + return false + } + return b +} + +func main() { + // Output to stdout instead of the default stderr + log.SetOutput(os.Stdout) + // create custom formatter + mintFormatter := mintJSONFormatter{} + // set custom formatter + log.SetFormatter(&mintFormatter) + // log Info or above -- success cases are Info level, failures are Fatal level + log.SetLevel(log.InfoLevel) + + tls := mustParseBool(os.Getenv(enableHTTPS)) + kms := mustParseBool(os.Getenv(enableKMS)) + if os.Getenv(enableKMS) == "" { + // Default to KMS tests. + kms = true + } + + // execute tests + if isFullMode() { + testMakeBucketErrorV2() + testGetObjectClosedTwiceV2() + testFPutObjectV2() + testMakeBucketRegionsV2() + testGetObjectReadSeekFunctionalV2() + testGetObjectReadAtFunctionalV2() + testGetObjectRanges() + testCopyObjectV2() + testFunctionalV2() + testComposeObjectErrorCasesV2() + testCompose10KSourcesV2() + testUserMetadataCopyingV2() + testPutObject0ByteV2() + testPutObjectNoLengthV2() + testPutObjectsUnknownV2() + testGetObjectContextV2() + testFPutObjectContextV2() + testFGetObjectContextV2() + testPutObjectContextV2() + testPutObjectWithVersioning() + testMakeBucketError() + testMakeBucketRegions() + testPutObjectWithMetadata() + testPutObjectReadAt() + testPutObjectStreaming() + testGetObjectSeekEnd() + testGetObjectClosedTwice() + testGetObjectS3Zip() + testRemoveMultipleObjects() + testRemoveMultipleObjectsWithResult() + testFPutObjectMultipart() + testFPutObject() + testGetObjectReadSeekFunctional() + testGetObjectReadAtFunctional() + testGetObjectReadAtWhenEOFWasReached() + testPresignedPostPolicy() + testCopyObject() + testComposeObjectErrorCases() + testCompose10KSources() + testUserMetadataCopying() + testBucketNotification() + testFunctional() + testGetObjectModified() + testPutObjectUploadSeekedObject() + testGetObjectContext() + testFPutObjectContext() + testFGetObjectContext() + testGetObjectACLContext() + testPutObjectContext() + testStorageClassMetadataPutObject() + testStorageClassInvalidMetadataPutObject() + testStorageClassMetadataCopyObject() + testPutObjectWithContentLanguage() + testListObjects() + testRemoveObjects() + testListObjectVersions() + testStatObjectWithVersioning() + testGetObjectWithVersioning() + testCopyObjectWithVersioning() + testConcurrentCopyObjectWithVersioning() + testComposeObjectWithVersioning() + testRemoveObjectWithVersioning() + testRemoveObjectsWithVersioning() + testObjectTaggingWithVersioning() + + // SSE-C tests will only work over TLS connection. + if tls { + testSSECEncryptionPutGet() + testSSECEncryptionFPut() + testSSECEncryptedGetObjectReadAtFunctional() + testSSECEncryptedGetObjectReadSeekFunctional() + testEncryptedCopyObjectV2() + testEncryptedSSECToSSECCopyObject() + testEncryptedSSECToUnencryptedCopyObject() + testUnencryptedToSSECCopyObject() + testUnencryptedToUnencryptedCopyObject() + testEncryptedEmptyObject() + testDecryptedCopyObject() + testSSECEncryptedToSSECCopyObjectPart() + testSSECMultipartEncryptedToSSECCopyObjectPart() + testSSECEncryptedToUnencryptedCopyPart() + testUnencryptedToSSECCopyObjectPart() + testUnencryptedToUnencryptedCopyPart() + testEncryptedSSECToSSES3CopyObject() + testEncryptedSSES3ToSSECCopyObject() + testSSECEncryptedToSSES3CopyObjectPart() + testSSES3EncryptedToSSECCopyObjectPart() + } + + // KMS tests + if kms { + testSSES3EncryptionPutGet() + testSSES3EncryptionFPut() + testSSES3EncryptedGetObjectReadAtFunctional() + testSSES3EncryptedGetObjectReadSeekFunctional() + testEncryptedSSES3ToSSES3CopyObject() + testEncryptedSSES3ToUnencryptedCopyObject() + testUnencryptedToSSES3CopyObject() + testUnencryptedToSSES3CopyObjectPart() + testSSES3EncryptedToUnencryptedCopyPart() + testSSES3EncryptedToSSES3CopyObjectPart() + } + } else { + testFunctional() + testFunctionalV2() + } +} diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go new file mode 100644 index 00000000..f251c1e9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go @@ -0,0 +1,85 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "io" +) + +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. +type hookReader struct { + source io.Reader + hook io.Reader +} + +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + n, err = sourceSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + } + + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + var m int64 + m, err = hookSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + if n != m { + return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) + } + } + return n, nil +} + +// Read implements io.Reader. Always reads from the source, the return +// value 'n' number of bytes are reported through the hook. Returns +// error for all non io.EOF conditions. +func (hr *hookReader) Read(b []byte) (n int, err error) { + n, err = hr.source.Read(b) + if err != nil && err != io.EOF { + return n, err + } + // Progress the hook with the total read bytes from the source. + if _, herr := hr.hook.Read(b[:n]); herr != nil { + if herr != io.EOF { + return n, herr + } + } + return n, err +} + +// newHook returns a io.ReadSeeker which implements hookReader that +// reports the data read from the source to the hook. +func newHook(source, hook io.Reader) io.Reader { + if hook == nil { + return source + } + return &hookReader{source, hook} +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go new file mode 100644 index 00000000..107a11b1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -0,0 +1,230 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/hex" + "encoding/xml" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/signer" + sha256 "github.com/minio/sha256-simd" +) + +// AssumeRoleResponse contains the result of successful AssumeRole request. +type AssumeRoleResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"` + + Result AssumeRoleResult `xml:"AssumeRoleResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// AssumeRoleResult - Contains the response to a successful AssumeRole +// request, including temporary credentials that can be used to make +// MinIO API requests. +type AssumeRoleResult struct { + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize int `xml:",omitempty"` +} + +// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSAssumeRole struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // STS endpoint to fetch STS credentials. + STSEndpoint string + + // various options for this request. + Options STSAssumeRoleOptions +} + +// STSAssumeRoleOptions collection of various input options +// to obtain AssumeRole credentials. +type STSAssumeRoleOptions struct { + // Mandatory inputs. + AccessKey string + SecretKey string + + Location string // Optional commonly needed with AWS STS. + DurationSeconds int // Optional defaults to 1 hour. + + // Optional only valid if using with AWS STS + RoleARN string + RoleSessionName string +} + +// NewSTSAssumeRole returns a pointer to a new +// Credentials object wrapping the STSAssumeRole. +func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if opts.AccessKey == "" || opts.SecretKey == "" { + return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") + } + return New(&STSAssumeRole{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + Options: opts, + }), nil +} + +const defaultDurationSeconds = 3600 + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) { + v := url.Values{} + v.Set("Action", "AssumeRole") + v.Set("Version", STSVersion) + if opts.RoleARN != "" { + v.Set("RoleArn", opts.RoleARN) + } + if opts.RoleSessionName != "" { + v.Set("RoleSessionName", opts.RoleSessionName) + } + if opts.DurationSeconds > defaultDurationSeconds { + v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds)) + } else { + v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds)) + } + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleResponse{}, err + } + u.Path = "/" + + postBody := strings.NewReader(v.Encode()) + hash := sha256.New() + if _, err = io.Copy(hash, postBody); err != nil { + return AssumeRoleResponse{}, err + } + postBody.Seek(0, 0) + + req, err := http.NewRequest(http.MethodPost, u.String(), postBody) + if err != nil { + return AssumeRoleResponse{}, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil))) + req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location) + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleResponse{}, err + } + defer closeResponse(resp) + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return AssumeRoleResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleResponse{}, errResp + } + + a := AssumeRoleResponse{} + if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil { + return AssumeRoleResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSAssumeRole) Retrieve() (Value, error) { + a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go new file mode 100644 index 00000000..6dc8e9d0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -0,0 +1,89 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Chain will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The Chain provides a way of chaining multiple providers together +// which will pick the first available using priority order of the +// Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the no credentials value. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again after IsExpired() is true. +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) +// +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } +// +type Chain struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return New(&Chain{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value, returns no credentials(anonymous) +// if no credentials provider returned any value. +// +// If a provider is found with credentials, it will be cached and any calls +// to IsExpired() will return the expired state of the cached provider. +func (c *Chain) Retrieve() (Value, error) { + for _, p := range c.Providers { + creds, _ := p.Retrieve() + // Always prioritize non-anonymous providers, if any. + if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { + continue + } + c.curr = p + return creds, nil + } + // At this point we have exhausted all the providers and + // are left without any credentials return anonymous. + return Value{ + SignerType: SignatureAnonymous, + }, nil +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *Chain) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample new file mode 100644 index 00000000..d793c9e0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample @@ -0,0 +1,17 @@ +{ + "version": "8", + "hosts": { + "play": { + "url": "https://play.min.io", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v2" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "accessKey", + "secretKey": "secret", + "api": "S3v4" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go new file mode 100644 index 00000000..6b93a27f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -0,0 +1,192 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "sync" + "time" +) + +const ( + // STSVersion sts version string + STSVersion = "2011-06-15" + + // How much duration to slash from the given expiration duration + defaultExpiryWindow = 0.8 +) + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Signature Type. + SignerType SignatureType +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type IAMCredentialProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + cut := window + if cut < 0 { + expireIn := expiration.Sub(e.CurrentTime()) + cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow)) + } + e.expiration = expiration.Add(-cut) +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// Credentials - A container for synchronous safe retrieval of credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sync.Mutex + + creds Value + forceRefresh bool + provider Provider +} + +// New returns a pointer to a new Credentials with the provider set. +func New(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + if c == nil { + return Value{}, nil + } + + c.Lock() + defer c.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.Lock() + defer c.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be refreshed. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.Lock() + defer c.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample new file mode 100644 index 00000000..7fc91d9d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go new file mode 100644 index 00000000..0c94477b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go @@ -0,0 +1,62 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package credentials provides credential retrieval and management +// for S3 compatible object storage. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go new file mode 100644 index 00000000..b6e60d0e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go @@ -0,0 +1,71 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvAWS retrieves credentials from the environment variables of the +// running process. EnvAWSironment credentials never expire. +// +// EnvAWSironment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. +// * Secret Token: AWS_SESSION_TOKEN. +type EnvAWS struct { + retrieved bool +} + +// NewEnvAWS returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvAWS() *Credentials { + return New(&EnvAWS{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvAWS) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go new file mode 100644 index 00000000..5bfeab14 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go @@ -0,0 +1,68 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvMinio retrieves credentials from the environment variables of the +// running process. EnvMinioironment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: MINIO_ACCESS_KEY. +// * Secret Access Key: MINIO_SECRET_KEY. +// * Access Key ID: MINIO_ROOT_USER. +// * Secret Access Key: MINIO_ROOT_PASSWORD. +type EnvMinio struct { + retrieved bool +} + +// NewEnvMinio returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvMinio() *Credentials { + return New(&EnvMinio{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("MINIO_ROOT_USER") + secret := os.Getenv("MINIO_ROOT_PASSWORD") + + signerType := SignatureV4 + if id == "" || secret == "" { + id = os.Getenv("MINIO_ACCESS_KEY") + secret = os.Getenv("MINIO_SECRET_KEY") + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvMinio) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go new file mode 100644 index 00000000..f4b027a4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go @@ -0,0 +1,96 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" +) + +// ErrorResponse - Is the typed error returned. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"` + STSError struct { + Type string `xml:"Type"` + Code string `xml:"Code"` + Message string `xml:"Message"` + } `xml:"Error"` + RequestID string `xml:"RequestId"` +} + +// Error - Is the typed error returned by all API operations. +type Error struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + Resource string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Captures the server string returned in response header. + Server string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// Error - Returns S3 error string. +func (e Error) Error() string { + if e.Message == "" { + return fmt.Sprintf("Error response code %s.", e.Code) + } + return e.Message +} + +// Error - Returns STS error string. +func (e ErrorResponse) Error() string { + if e.STSError.Message == "" { + return fmt.Sprintf("Error response code %s.", e.STSError.Code) + } + return e.STSError.Message +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go new file mode 100644 index 00000000..ccc8251f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -0,0 +1,120 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "os" + "path/filepath" + + homedir "github.com/mitchellh/go-homedir" + ini "gopkg.in/ini.v1" +) + +// A FileAWSCredentials retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type FileAWSCredentials struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileAWSCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewFileAWSCredentials(filename string, profile string) *Credentials { + return New(&FileAWSCredentials{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + if p.Filename == "" { + p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.Filename == "" { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + } + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + if p.Profile == "" { + p.Profile = "default" + } + } + + p.retrieved = false + + iniProfile, err := loadProfile(p.Filename, p.Profile) + if err != nil { + return Value{}, err + } + + // Default to empty string if not found. + id := iniProfile.Key("aws_access_key_id") + // Default to empty string if not found. + secret := iniProfile.Key("aws_secret_access_key") + // Default to empty string if not found. + token := iniProfile.Key("aws_session_token") + + p.retrieved = true + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + SignerType: SignatureV4, + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileAWSCredentials) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (*ini.Section, error) { + config, err := ini.Load(filename) + if err != nil { + return nil, err + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return nil, err + } + return iniProfile, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go new file mode 100644 index 00000000..dc3f3cc0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -0,0 +1,135 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + + jsoniter "github.com/json-iterator/go" + homedir "github.com/mitchellh/go-homedir" +) + +// A FileMinioClient retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Configuration file example: $HOME/.mc/config.json +type FileMinioClient struct { + // Path to the shared credentials file. + // + // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.mc/config.json" + // Windows: "%USERALIAS%\mc\config.json" + Filename string + + // MinIO Alias to extract credentials from the shared credentials file. If empty + // will default to environment variable "MINIO_ALIAS" or "default" if + // environment variable is also not set. + Alias string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileMinioClient returns a pointer to a new Credentials object +// wrapping the Alias file provider. +func NewFileMinioClient(filename string, alias string) *Credentials { + return New(&FileMinioClient{ + Filename: filename, + Alias: alias, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + if p.Filename == "" { + if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { + p.Filename = value + } else { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.Filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.Filename = filepath.Join(homeDir, "mc", "config.json") + } + } + } + + if p.Alias == "" { + p.Alias = os.Getenv("MINIO_ALIAS") + if p.Alias == "" { + p.Alias = "s3" + } + } + + p.retrieved = false + + hostCfg, err := loadAlias(p.Filename, p.Alias) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return Value{ + AccessKeyID: hostCfg.AccessKey, + SecretAccessKey: hostCfg.SecretKey, + SignerType: parseSignatureType(hostCfg.API), + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileMinioClient) IsExpired() bool { + return !p.retrieved +} + +// hostConfig configuration of a host. +type hostConfig struct { + URL string `json:"url"` + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + API string `json:"api"` +} + +// config config version. +type config struct { + Version string `json:"version"` + Hosts map[string]hostConfig `json:"hosts"` +} + +// loadAliass loads from the file pointed to by shared credentials filename for alias. +// The credentials retrieved from the alias will be returned or error. Error will be +// returned if it fails to read from the file. +func loadAlias(filename, alias string) (hostConfig, error) { + cfg := &config{} + json := jsoniter.ConfigCompatibleWithStandardLibrary + + configBytes, err := ioutil.ReadFile(filename) + if err != nil { + return hostConfig{}, err + } + if err = json.Unmarshal(configBytes, cfg); err != nil { + return hostConfig{}, err + } + return cfg.Hosts[alias], nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go new file mode 100644 index 00000000..f7a4af4a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -0,0 +1,374 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bufio" + "context" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" + + jsoniter "github.com/json-iterator/go" +) + +// DefaultExpiryWindow - Default expiry window. +// ExpiryWindow will allow the credentials to trigger refreshing +// prior to the credentials actually expiring. This is beneficial +// so race conditions with expiring credentials do not cause +// request to fail unexpectedly due to ExpiredTokenException exceptions. +// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration. +// When used the tokens refresh will be triggered when 80% of the elapsed +// time until the actual expiration time is passed. +const DefaultExpiryWindow = -1 + +// A IAM retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +type IAM struct { + Expiry + + // Required http Client to use when connecting to IAM metadata service. + Client *http.Client + + // Custom endpoint to fetch IAM role credentials. + Endpoint string +} + +// IAM Roles for Amazon EC2 +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +const ( + defaultIAMRoleEndpoint = "http://169.254.169.254" + defaultECSRoleEndpoint = "http://169.254.170.2" + defaultSTSRoleEndpoint = "https://sts.amazonaws.com" + defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" + tokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + tokenPath = "/latest/api/token" + tokenTTL = "21600" + tokenRequestHeader = "X-aws-ec2-metadata-token" +) + +// NewIAM returns a pointer to a new Credentials object wrapping the IAM. +func NewIAM(endpoint string) *Credentials { + return New(&IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + Endpoint: endpoint, + }) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") + var roleCreds ec2RoleCredRespBody + var err error + + endpoint := m.Endpoint + switch { + case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0: + if len(endpoint) == 0 { + if len(os.Getenv("AWS_REGION")) > 0 { + if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") { + endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com.cn" + } else { + endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com" + } + } else { + endpoint = defaultSTSRoleEndpoint + } + } + + creds := &STSWebIdentity{ + Client: m.Client, + STSEndpoint: endpoint, + GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { + token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) + if err != nil { + return nil, err + } + + return &WebIdentityToken{Token: string(token)}, nil + }, + RoleARN: os.Getenv("AWS_ROLE_ARN"), + roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"), + } + + stsWebIdentityCreds, err := creds.Retrieve() + if err == nil { + m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) + } + return stsWebIdentityCreds, err + + case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0: + if len(endpoint) == 0 { + endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint, + os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) + } + + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + + case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0: + if len(endpoint) == 0 { + endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") + var ok bool + if ok, err = isLoopback(endpoint); !ok { + if err == nil { + err = fmt.Errorf("uri host is not a loopback address: %s", endpoint) + } + break + } + } + + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + + default: + roleCreds, err = getCredentials(m.Client, endpoint) + } + + if err != nil { + return Value{}, err + } + // Expiry window is set to 10secs. + m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + SignerType: SignatureV4, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string + + // Unused params. + LastUpdated time.Time + Type string +} + +// Get the final IAM role URL where the request will +// be sent to fetch the rolling access credentials. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func getIAMRoleURL(endpoint string) (*url.URL, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + u.Path = defaultIAMSecurityCredsPath + return u, nil +} + +// listRoleNames lists of credential role names associated +// with the current EC2 service. If there are no credentials, +// or there is an error making or receiving the request. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) { + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + if token != "" { + req.Header.Add(tokenRequestHeader, token) + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.New(resp.Status) + } + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, err + } + + return credsList, nil +} + +func getEcsTaskCredentials(client *http.Client, endpoint string, token string) (ec2RoleCredRespBody, error) { + req, err := http.NewRequest(http.MethodGet, endpoint, nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if token != "" { + req.Header.Set("Authorization", token) + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + return respCreds, nil +} + +func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+tokenPath, nil) + if err != nil { + return "", err + } + req.Header.Add(tokenRequestTTLHeader, tokenTTL) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", errors.New(resp.Status) + } + return string(data), nil +} + +// getCredentials - obtains the credentials from the IAM role name associated with +// the current EC2 service. +// +// If the credentials cannot be found, or there is an error +// reading the response an error will be returned. +func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + if endpoint == "" { + endpoint = defaultIAMRoleEndpoint + } + + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + token, _ := fetchIMDSToken(client, endpoint) + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + u, err := getIAMRoleURL(endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + roleNames, err := listRoleNames(client, u, token) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if len(roleNames) == 0 { + return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // - An instance profile can contain only one IAM role. This limit cannot be increased. + roleName := roleNames[0] + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // The following command retrieves the security credentials for an + // IAM role named `s3access`. + // + // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access + // + u.Path = path.Join(u.Path, roleName) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + if token != "" { + req.Header.Add(tokenRequestHeader, token) + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, errors.New(respCreds.Message) + } + + return respCreds, nil +} + +// isLoopback identifies if a uri's host is on a loopback address +func isLoopback(uri string) (bool, error) { + u, err := url.Parse(uri) + if err != nil { + return false, err + } + + host := u.Hostname() + if len(host) == 0 { + return false, fmt.Errorf("can't parse host from uri: %s", uri) + } + + ips, err := net.LookupHost(host) + if err != nil { + return false, err + } + for _, ip := range ips { + if !net.ParseIP(ip).IsLoopback() { + return false, nil + } + } + + return true, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go new file mode 100644 index 00000000..b7943330 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "strings" + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is SignatureV4 or SignatureDefault. +const ( + // SignatureDefault is always set to v4. + SignatureDefault SignatureType = iota + SignatureV4 + SignatureV2 + SignatureV4Streaming + SignatureAnonymous // Anonymous signature signifies, no signature. +) + +// IsV2 - is signature SignatureV2? +func (s SignatureType) IsV2() bool { + return s == SignatureV2 +} + +// IsV4 - is signature SignatureV4? +func (s SignatureType) IsV4() bool { + return s == SignatureV4 || s == SignatureDefault +} + +// IsStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) IsStreamingV4() bool { + return s == SignatureV4Streaming +} + +// IsAnonymous - is signature empty? +func (s SignatureType) IsAnonymous() bool { + return s == SignatureAnonymous +} + +// Stringer humanized version of signature type, +// strings returned here are case insensitive. +func (s SignatureType) String() string { + if s.IsV2() { + return "S3v2" + } else if s.IsV4() { + return "S3v4" + } else if s.IsStreamingV4() { + return "S3v4Streaming" + } + return "Anonymous" +} + +func parseSignatureType(str string) SignatureType { + if strings.EqualFold(str, "S3v4") { + return SignatureV4 + } else if strings.EqualFold(str, "S3v2") { + return SignatureV2 + } else if strings.EqualFold(str, "S3v4Streaming") { + return SignatureV4Streaming + } + return SignatureAnonymous +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go new file mode 100644 index 00000000..7dde00b0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go @@ -0,0 +1,67 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Static is a set of credentials which are set programmatically, +// and will never expire. +type Static struct { + Value +} + +// NewStaticV2 returns a pointer to a new Credentials object +// wrapping a static credentials value provider, signature is +// set to v2. If access and secret are not specified then +// regardless of signature type set it Value will return +// as anonymous. +func NewStaticV2(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV2) +} + +// NewStaticV4 is similar to NewStaticV2 with similar considerations. +func NewStaticV4(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV4) +} + +// NewStatic returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { + return New(&Static{ + Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + SignerType: signerType, + }, + }) +} + +// Retrieve returns the static credentials. +func (s *Static) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + // Anonymous is not an error + return Value{SignerType: SignatureAnonymous}, nil + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For Static, the credentials never expired. +func (s *Static) IsExpired() bool { + return false +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go new file mode 100644 index 00000000..1f106ef7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -0,0 +1,179 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" +) + +// AssumedRoleUser - The identifiers for the temporary security credentials that +// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + Arn string + AssumedRoleID string `xml:"AssumeRoleId"` +} + +// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. +type AssumeRoleWithClientGrantsResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` + Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants +// request, including temporary credentials that can be used to make MinIO API requests. +type ClientGrantsResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromClientGrantsToken string `xml:",omitempty"` +} + +// ClientGrantsToken - client grants token with expiry. +type ClientGrantsToken struct { + Token string + Expiry int +} + +// A STSClientGrants retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSClientGrants struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // MinIO endpoint to fetch STS credentials. + STSEndpoint string + + // getClientGrantsTokenExpiry function to retrieve tokens + // from IDP This function should return two values one is + // accessToken which is a self contained access token (JWT) + // and second return value is the expiry associated with + // this token. This is a customer provided function and + // is mandatory. + GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error) +} + +// NewSTSClientGrants returns a pointer to a new +// Credentials object wrapping the STSClientGrants. +func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getClientGrantsTokenExpiry == nil { + return nil, errors.New("Client grants access token and expiry retrieval function should be defined") + } + return New(&STSClientGrants{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry, + }), nil +} + +func getClientGrantsCredentials(clnt *http.Client, endpoint string, + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error), +) (AssumeRoleWithClientGrantsResponse, error) { + accessToken, err := getClientGrantsTokenExpiry() + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithClientGrants") + v.Set("Token", accessToken.Token) + v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) + v.Set("Version", STSVersion) + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleWithClientGrantsResponse{}, errResp + } + + a := AssumeRoleWithClientGrantsResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSClientGrants) Retrieve() (Value, error) { + a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go new file mode 100644 index 00000000..586995e8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -0,0 +1,204 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" +) + +// AssumeRoleWithLDAPResponse contains the result of successful +// AssumeRoleWithLDAPIdentity request +type AssumeRoleWithLDAPResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` + Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// LDAPIdentityResult - contains credentials for a successful +// AssumeRoleWithLDAPIdentity request. +type LDAPIdentityResult struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + SubjectFromToken string `xml:",omitempty"` +} + +// LDAPIdentity retrieves credentials from MinIO +type LDAPIdentity struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // Exported STS endpoint to fetch STS credentials. + STSEndpoint string + + // LDAP username/password used to fetch LDAP STS credentials. + LDAPUsername, LDAPPassword string + + // Session policy to apply to the generated credentials. Leave empty to + // use the full access policy available to the user. + Policy string + + // RequestedExpiry is the configured expiry duration for credentials + // requested from LDAP. + RequestedExpiry time.Duration +} + +// NewLDAPIdentity returns new credentials object that uses LDAP +// Identity. +func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) { + l := LDAPIdentity{ + Client: &http.Client{Transport: http.DefaultTransport}, + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + } + for _, optFunc := range optFuncs { + optFunc(&l) + } + return New(&l), nil +} + +// LDAPIdentityOpt is a function type used to configured the LDAPIdentity +// instance. +type LDAPIdentityOpt func(*LDAPIdentity) + +// LDAPIdentityPolicyOpt sets the session policy for requested credentials. +func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.Policy = policy + } +} + +// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials. +func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.RequestedExpiry = d + } +} + +func stripPassword(err error) error { + urlErr, ok := err.(*url.Error) + if ok { + u, _ := url.Parse(urlErr.URL) + if u == nil { + return urlErr + } + values := u.Query() + values.Set("LDAPPassword", "xxxxx") + u.RawQuery = values.Encode() + urlErr.URL = u.String() + return urlErr + } + return err +} + +// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses +// LDAP Identity with a specified session policy. The `policy` parameter must be +// a JSON string specifying the policy document. +// +// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. +func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { + return New(&LDAPIdentity{ + Client: &http.Client{Transport: http.DefaultTransport}, + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + Policy: policy, + }), nil +} + +// Retrieve gets the credential by calling the MinIO STS API for +// LDAP on the configured stsEndpoint. +func (k *LDAPIdentity) Retrieve() (value Value, err error) { + u, err := url.Parse(k.STSEndpoint) + if err != nil { + return value, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithLDAPIdentity") + v.Set("Version", STSVersion) + v.Set("LDAPUsername", k.LDAPUsername) + v.Set("LDAPPassword", k.LDAPPassword) + if k.Policy != "" { + v.Set("Policy", k.Policy) + } + if k.RequestedExpiry != 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) + } + + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return value, stripPassword(err) + } + + resp, err := k.Client.Do(req) + if err != nil { + return value, stripPassword(err) + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return value, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return value, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return value, errResp + } + + r := AssumeRoleWithLDAPResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { + return + } + + cr := r.Result.Credentials + k.SetExpiration(cr.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: cr.AccessKey, + SecretAccessKey: cr.SecretKey, + SessionToken: cr.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go new file mode 100644 index 00000000..c7ac4db3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -0,0 +1,209 @@ +// MinIO Go Library for Amazon S3 Compatible Cloud Storage +// Copyright 2021 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "bytes" + "crypto/tls" + "encoding/xml" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "time" +) + +// CertificateIdentityOption is an optional AssumeRoleWithCertificate +// parameter - e.g. a custom HTTP transport configuration or S3 credental +// livetime. +type CertificateIdentityOption func(*STSCertificateIdentity) + +// CertificateIdentityWithTransport returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given http.RoundTripper. +func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t }) +} + +// CertificateIdentityWithExpiry returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given livetime. +// +// Fetched S3 credentials will have the given livetime if the STS server +// allows such credentials. +func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime }) +} + +// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and +// rotates those credentials once they expire. +type STSCertificateIdentity struct { + Expiry + + // STSEndpoint is the base URL endpoint of the STS API. + // For example, https://minio.local:9000 + STSEndpoint string + + // S3CredentialLivetime is the duration temp. S3 access + // credentials should be valid. + // + // It represents the access credential livetime requested + // by the client. The STS server may choose to issue + // temp. S3 credentials that have a different - usually + // shorter - livetime. + // + // The default livetime is one hour. + S3CredentialLivetime time.Duration + + // Client is the HTTP client used to authenticate and fetch + // S3 credentials. + // + // A custom TLS client configuration can be specified by + // using a custom http.Transport: + // Client: http.Client { + // Transport: &http.Transport{ + // TLSClientConfig: &tls.Config{}, + // }, + // } + Client http.Client +} + +var _ Provider = (*STSWebIdentity)(nil) // compiler check + +// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates +// to the given STS endpoint with the given TLS certificate and retrieves and +// rotates S3 credentials. +func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) { + if endpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if _, err := url.Parse(endpoint); err != nil { + return nil, err + } + identity := &STSCertificateIdentity{ + STSEndpoint: endpoint, + Client: http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 5 * time.Second, + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{certificate}, + }, + }, + }, + } + for _, option := range options { + option(identity) + } + return New(identity), nil +} + +// Retrieve fetches a new set of S3 credentials from the configured +// STS API endpoint. +func (i *STSCertificateIdentity) Retrieve() (Value, error) { + endpointURL, err := url.Parse(i.STSEndpoint) + if err != nil { + return Value{}, err + } + livetime := i.S3CredentialLivetime + if livetime == 0 { + livetime = 1 * time.Hour + } + + queryValues := url.Values{} + queryValues.Set("Action", "AssumeRoleWithCertificate") + queryValues.Set("Version", STSVersion) + endpointURL.RawQuery = queryValues.Encode() + + req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil) + if err != nil { + return Value{}, err + } + req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) + + resp, err := i.Client.Do(req) + if err != nil { + return Value{}, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return Value{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return Value{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return Value{}, errResp + } + + const MaxSize = 10 * 1 << 20 + var body io.Reader = resp.Body + if resp.ContentLength > 0 && resp.ContentLength < MaxSize { + body = io.LimitReader(body, resp.ContentLength) + } else { + body = io.LimitReader(body, MaxSize) + } + + var response assumeRoleWithCertificateResponse + if err = xml.NewDecoder(body).Decode(&response); err != nil { + return Value{}, err + } + i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: response.Result.Credentials.AccessKey, + SecretAccessKey: response.Result.Credentials.SecretKey, + SessionToken: response.Result.Credentials.SessionToken, + SignerType: SignatureDefault, + }, nil +} + +// Expiration returns the expiration time of the current S3 credentials. +func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration } + +type assumeRoleWithCertificateResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"` + Result struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:"Credentials" json:"credentials,omitempty"` + } `xml:"AssumeRoleWithCertificateResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go new file mode 100644 index 00000000..19bc3ddf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -0,0 +1,204 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. +type AssumeRoleWithWebIdentityResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` + Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity +// request, including temporary credentials that can be used to make MinIO API requests. +type WebIdentityResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromWebIdentityToken string `xml:",omitempty"` +} + +// WebIdentityToken - web identity token with expiry. +type WebIdentityToken struct { + Token string + AccessToken string + Expiry int +} + +// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSWebIdentity struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // Exported STS endpoint to fetch STS credentials. + STSEndpoint string + + // Exported GetWebIDTokenExpiry function which returns ID + // tokens from IDP. This function should return two values + // one is ID token which is a self contained ID token (JWT) + // and second return value is the expiry associated with + // this token. + // This is a customer provided function and is mandatory. + GetWebIDTokenExpiry func() (*WebIdentityToken, error) + + // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is + // assuming. + RoleARN string + + // roleSessionName is the identifier for the assumed role session. + roleSessionName string +} + +// NewSTSWebIdentity returns a pointer to a new +// Credentials object wrapping the STSWebIdentity. +func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getWebIDTokenExpiry == nil { + return nil, errors.New("Web ID token and expiry retrieval function should be defined") + } + return New(&STSWebIdentity{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + GetWebIDTokenExpiry: getWebIDTokenExpiry, + }), nil +} + +func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, + getWebIDTokenExpiry func() (*WebIdentityToken, error), +) (AssumeRoleWithWebIdentityResponse, error) { + idToken, err := getWebIDTokenExpiry() + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithWebIdentity") + if len(roleARN) > 0 { + v.Set("RoleArn", roleARN) + + if len(roleSessionName) == 0 { + roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10) + } + v.Set("RoleSessionName", roleSessionName) + } + v.Set("WebIdentityToken", idToken.Token) + if idToken.AccessToken != "" { + // Usually set when server is using extended userInfo endpoint. + v.Set("WebIdentityAccessToken", idToken.AccessToken) + } + if idToken.Expiry > 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) + } + v.Set("Version", STSVersion) + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleWithWebIdentityResponse{}, errResp + } + + a := AssumeRoleWithWebIdentityResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSWebIdentity) Retrieve() (Value, error) { + a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} + +// Expiration returns the expiration time of the credentials +func (m *STSWebIdentity) Expiration() time.Time { + return m.expiration +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go new file mode 100644 index 00000000..06e68e73 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -0,0 +1,198 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/md5" + "encoding/base64" + "errors" + "net/http" + + jsoniter "github.com/json-iterator/go" + "golang.org/x/crypto/argon2" +) + +const ( + // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. + sseGenericHeader = "X-Amz-Server-Side-Encryption" + + // sseKmsKeyID is the AWS SSE-KMS key id. + sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" + // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. + sseEncryptionContext = sseGenericHeader + "-Context" + + // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. + sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" + // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. + sseCustomerKey = sseGenericHeader + "-Customer-Key" + // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. + sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" + + // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. + sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. + sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. + sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" +) + +// PBKDF creates a SSE-C key from the provided password and salt. +// PBKDF is a password-based key derivation function +// which can be used to derive a high-entropy cryptographic +// key from a low-entropy password and a salt. +type PBKDF func(password, salt []byte) ServerSide + +// DefaultPBKDF is the default PBKDF. It uses Argon2id with the +// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). +var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { + sse := ssec{} + copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) + return sse +} + +// Type is the server-side-encryption method. It represents one of +// the following encryption methods: +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption +type Type string + +const ( + // SSEC represents server-side-encryption with customer provided keys + SSEC Type = "SSE-C" + // KMS represents server-side-encryption with managed keys + KMS Type = "KMS" + // S3 represents server-side-encryption using S3 storage encryption + S3 Type = "S3" +) + +// ServerSide is a form of S3 server-side-encryption. +type ServerSide interface { + // Type returns the server-side-encryption method. + Type() Type + + // Marshal adds encryption headers to the provided HTTP headers. + // It marks an HTTP request as server-side-encryption request + // and inserts the required data into the headers. + Marshal(h http.Header) +} + +// NewSSE returns a server-side-encryption using S3 storage encryption. +// Using SSE-S3 the server will encrypt the object with server-managed keys. +func NewSSE() ServerSide { return s3{} } + +// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. +func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { + if context == nil { + return kms{key: keyID, hasContext: false}, nil + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + serializedContext, err := json.Marshal(context) + if err != nil { + return nil, err + } + return kms{key: keyID, context: serializedContext, hasContext: true}, nil +} + +// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. +// The key must be 32 bytes long. +func NewSSEC(key []byte) (ServerSide, error) { + if len(key) != 32 { + return nil, errors.New("encrypt: SSE-C key must be 256 bit long") + } + sse := ssec{} + copy(sse[:], key) + return sse, nil +} + +// SSE transforms a SSE-C copy encryption into a SSE-C encryption. +// It is the inverse of SSECopy(...). +// +// If the provided sse is no SSE-C copy encryption SSE returns +// sse unmodified. +func SSE(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssecCopy); ok { + return ssec(sse) + } + return sse +} + +// SSECopy transforms a SSE-C encryption into a SSE-C copy +// encryption. This is required for SSE-C key rotation or a SSE-C +// copy where the source and the destination should be encrypted. +// +// If the provided sse is no SSE-C encryption SSECopy returns +// sse unmodified. +func SSECopy(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssec); ok { + return ssecCopy(sse) + } + return sse +} + +type ssec [32]byte + +func (s ssec) Type() Type { return SSEC } + +func (s ssec) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCustomerAlgorithm, "AES256") + h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type ssecCopy [32]byte + +func (s ssecCopy) Type() Type { return SSEC } + +func (s ssecCopy) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCopyCustomerAlgorithm, "AES256") + h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type s3 struct{} + +func (s s3) Type() Type { return S3 } + +func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } + +type kms struct { + key string + context []byte + hasContext bool +} + +func (s kms) Type() Type { return KMS } + +func (s kms) Marshal(h http.Header) { + h.Set(sseGenericHeader, "aws:kms") + if s.key != "" { + h.Set(sseKmsKeyID, s.key) + } + if s.hasContext { + h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go new file mode 100644 index 00000000..743d8eca --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -0,0 +1,458 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package lifecycle contains all the lifecycle related data types and marshallers. +package lifecycle + +import ( + "encoding/json" + "encoding/xml" + "errors" + "time" +) + +var errMissingStorageClass = errors.New("storage-class cannot be empty") + +// AbortIncompleteMultipartUpload structure, not supported yet on MinIO +type AbortIncompleteMultipartUpload struct { + XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` + DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n AbortIncompleteMultipartUpload) IsDaysNull() bool { + return n.DaysAfterInitiation == ExpirationDays(0) +} + +// MarshalXML if days after initiation is set to non-zero value +func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.IsDaysNull() { + return nil + } + type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload + return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start) +} + +// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire. +// Upon expiration, server permanently deletes the noncurrent object versions. +// Set this lifecycle configuration action on a bucket that has versioning enabled +// (or suspended) to request server delete noncurrent object versions at a +// specific period in the object's lifetime. +type NoncurrentVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` + NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty"` +} + +// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions. +func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.isNull() { + return nil + } + type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration + return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start) +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionExpiration) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +func (n NoncurrentVersionExpiration) isNull() bool { + return n.IsDaysNull() && n.NewerNoncurrentVersions == 0 +} + +// NoncurrentVersionTransition structure, set this action to request server to +// transition noncurrent object versions to different set storage classes +// at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"` + NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionTransition) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +// IsStorageClassEmpty returns true if storage class field is empty +func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { + return n.StorageClass == "" +} + +func (n NoncurrentVersionTransition) isNull() bool { + return n.StorageClass == "" +} + +// UnmarshalJSON implements NoncurrentVersionTransition JSONify +func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error { + type noncurrentVersionTransition NoncurrentVersionTransition + var nt noncurrentVersionTransition + err := json.Unmarshal(b, &nt) + if err != nil { + return err + } + + if nt.StorageClass == "" { + return errMissingStorageClass + } + *n = NoncurrentVersionTransition(nt) + return nil +} + +// MarshalXML is extended to leave out +// tags +func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.isNull() { + return nil + } + type noncurrentVersionTransitionWrapper NoncurrentVersionTransition + return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start) +} + +// Tag structure key/value pair representing an object tag to apply lifecycle configuration +type Tag struct { + XMLName xml.Name `xml:"Tag,omitempty" json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Transition structure - transition details of lifecycle configuration +type Transition struct { + XMLName xml.Name `xml:"Transition" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + Days ExpirationDays `xml:"Days" json:"Days"` +} + +// UnmarshalJSON returns an error if storage-class is empty. +func (t *Transition) UnmarshalJSON(b []byte) error { + type transition Transition + var tr transition + err := json.Unmarshal(b, &tr) + if err != nil { + return err + } + + if tr.StorageClass == "" { + return errMissingStorageClass + } + *t = Transition(tr) + return nil +} + +// MarshalJSON customizes json encoding by omitting empty values +func (t Transition) MarshalJSON() ([]byte, error) { + if t.IsNull() { + return nil, nil + } + type transition struct { + Date *ExpirationDate `json:"Date,omitempty"` + StorageClass string `json:"StorageClass,omitempty"` + Days *ExpirationDays `json:"Days"` + } + + newt := transition{ + StorageClass: t.StorageClass, + } + + if !t.IsDateNull() { + newt.Date = &t.Date + } else { + newt.Days = &t.Days + } + return json.Marshal(newt) +} + +// IsDaysNull returns true if days field is null +func (t Transition) IsDaysNull() bool { + return t.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (t Transition) IsDateNull() bool { + return t.Date.Time.IsZero() +} + +// IsNull returns true if no storage-class is set. +func (t Transition) IsNull() bool { + return t.StorageClass == "" +} + +// MarshalXML is transition is non null +func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if t.IsNull() { + return nil + } + type transitionWrapper Transition + return en.EncodeElement(transitionWrapper(t), startElement) +} + +// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter +type And struct { + XMLName xml.Name `xml:"And" json:"-"` + Prefix string `xml:"Prefix" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag" json:"Tags,omitempty"` +} + +// IsEmpty returns true if Tags field is null +func (a And) IsEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" +} + +// Filter will be used in selecting rule(s) for lifecycle configuration +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// IsNull returns true if all Filter fields are empty. +func (f Filter) IsNull() bool { + return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" +} + +// MarshalJSON customizes json encoding by removing empty values. +func (f Filter) MarshalJSON() ([]byte, error) { + type filter struct { + And *And `json:"And,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Tag *Tag `json:"Tag,omitempty"` + } + + newf := filter{ + Prefix: f.Prefix, + } + if !f.Tag.IsEmpty() { + newf.Tag = &f.Tag + } + if !f.And.IsEmpty() { + newf.And = &f.And + } + return json.Marshal(newf) +} + +// MarshalXML - produces the xml representation of the Filter struct +// only one of Prefix, And and Tag should be present in the output. +func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + switch { + case !f.And.IsEmpty(): + if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { + return err + } + case !f.Tag.IsEmpty(): + if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { + return err + } + default: + // Always print Prefix field when both And & Tag are empty + if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// ExpirationDays is a type alias to unmarshal Days in Expiration +type ExpirationDays int + +// MarshalXML encodes number of days to expire if it is non-zero and +// encodes empty string otherwise +func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDays == 0 { + return nil + } + return e.EncodeElement(int(eDays), startElement) +} + +// ExpirationDate is a embedded type containing time.Time to unmarshal +// Date in Expiration +type ExpirationDate struct { + time.Time +} + +// MarshalXML encodes expiration date if it is non-zero and encodes +// empty string otherwise +func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDate.Time.IsZero() { + return nil + } + return e.EncodeElement(eDate.Format(time.RFC3339), startElement) +} + +// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. +type ExpireDeleteMarker bool + +// MarshalXML encodes delete marker boolean into an XML form. +func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if !b { + return nil + } + type expireDeleteMarkerWrapper ExpireDeleteMarker + return e.EncodeElement(expireDeleteMarkerWrapper(b), startElement) +} + +// IsEnabled returns true if the auto delete-marker expiration is enabled +func (b ExpireDeleteMarker) IsEnabled() bool { + return bool(b) +} + +// Expiration structure - expiration details of lifecycle configuration +type Expiration struct { + XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` +} + +// MarshalJSON customizes json encoding by removing empty day/date specification. +func (e Expiration) MarshalJSON() ([]byte, error) { + type expiration struct { + Date *ExpirationDate `json:"Date,omitempty"` + Days *ExpirationDays `json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker + } + + newexp := expiration{ + DeleteMarker: e.DeleteMarker, + } + if !e.IsDaysNull() { + newexp.Days = &e.Days + } + if !e.IsDateNull() { + newexp.Date = &e.Date + } + return json.Marshal(newexp) +} + +// IsDaysNull returns true if days field is null +func (e Expiration) IsDaysNull() bool { + return e.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (e Expiration) IsDateNull() bool { + return e.Date.Time.IsZero() +} + +// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled +func (e Expiration) IsDeleteMarkerExpirationEnabled() bool { + return e.DeleteMarker.IsEnabled() +} + +// IsNull returns true if both date and days fields are null +func (e Expiration) IsNull() bool { + return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() +} + +// MarshalXML is expiration is non null +func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if e.IsNull() { + return nil + } + type expirationWrapper Expiration + return en.EncodeElement(expirationWrapper(e), startElement) +} + +// MarshalJSON customizes json encoding by omitting empty values +func (r Rule) MarshalJSON() ([]byte, error) { + type rule struct { + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration *Expiration `json:"Expiration,omitempty"` + ID string `json:"ID"` + RuleFilter *Filter `json:"Filter,omitempty"` + NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Status string `json:"Status"` + Transition *Transition `json:"Transition,omitempty"` + } + newr := rule{ + Prefix: r.Prefix, + Status: r.Status, + ID: r.ID, + } + + if !r.RuleFilter.IsNull() { + newr.RuleFilter = &r.RuleFilter + } + if !r.AbortIncompleteMultipartUpload.IsDaysNull() { + newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload + } + if !r.Expiration.IsNull() { + newr.Expiration = &r.Expiration + } + if !r.Transition.IsNull() { + newr.Transition = &r.Transition + } + if !r.NoncurrentVersionExpiration.isNull() { + newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration + } + if !r.NoncurrentVersionTransition.isNull() { + newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition + } + + return json.Marshal(newr) +} + +// Rule represents a single rule in lifecycle configuration +type Rule struct { + XMLName xml.Name `xml:"Rule,omitempty" json:"-"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` + ID string `xml:"ID" json:"ID"` + RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Status string `xml:"Status" json:"Status"` + Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"` +} + +// Configuration is a collection of Rule objects. +type Configuration struct { + XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"` + Rules []Rule `xml:"Rule"` +} + +// Empty check if lifecycle configuration is empty +func (c *Configuration) Empty() bool { + if c == nil { + return true + } + return len(c.Rules) == 0 +} + +// NewConfiguration initializes a fresh lifecycle configuration +// for manipulation, such as setting and removing lifecycle rules +// and filters. +func NewConfiguration() *Configuration { + return &Configuration{} +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go new file mode 100644 index 00000000..d0a47163 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + ContentType string `json:"contentType,omitempty"` + UserMetadata map[string]string `json:"userMetadata,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// sourceInfo represents information on the client that +// triggered the event notification. +type sourceInfo struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// Event represents an Amazon an S3 bucket notification event. +type Event struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` + Source sourceInfo `json:"source"` +} + +// Info - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type Info struct { + Records []Event + Err error +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go new file mode 100644 index 00000000..75a1f609 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -0,0 +1,397 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +import ( + "encoding/xml" + "errors" + "fmt" + + "github.com/minio/minio-go/v7/pkg/set" +) + +// EventType is a S3 notification event associated to the bucket notification configuration +type EventType string + +// The role of all event types are described in : +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +const ( + ObjectCreatedAll EventType = "s3:ObjectCreated:*" + ObjectCreatedPut = "s3:ObjectCreated:Put" + ObjectCreatedPost = "s3:ObjectCreated:Post" + ObjectCreatedCopy = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectAccessedGet = "s3:ObjectAccessed:Get" + ObjectAccessedHead = "s3:ObjectAccessed:Head" + ObjectAccessedAll = "s3:ObjectAccessed:*" + ObjectRemovedAll = "s3:ObjectRemoved:*" + ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + BucketCreatedAll = "s3:BucketCreated:*" + BucketRemovedAll = "s3:BucketRemoved:*" +) + +// FilterRule - child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// S3Key - child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type S3Key struct { + FilterRules []FilterRule `xml:"FilterRule,omitempty"` +} + +// Filter - a tag in the notification xml structure which carries +// suffix/prefix filters +type Filter struct { + S3Key S3Key `xml:"S3Key,omitempty"` +} + +// Arn - holds ARN information that will be sent to the web service, +// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +type Arn struct { + Partition string + Service string + Region string + AccountID string + Resource string +} + +// NewArn creates new ARN based on the given partition, service, region, account id and resource +func NewArn(partition, service, region, accountID, resource string) Arn { + return Arn{ + Partition: partition, + Service: service, + Region: region, + AccountID: accountID, + Resource: resource, + } +} + +// String returns the string format of the ARN +func (arn Arn) String() string { + return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource +} + +// Config - represents one single notification configuration +// such as topic, queue or lambda configuration. +type Config struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []EventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` +} + +// NewConfig creates one notification config and sets the given ARN +func NewConfig(arn Arn) Config { + return Config{Arn: arn, Filter: &Filter{}} +} + +// AddEvents adds one event to the current notification config +func (t *Config) AddEvents(events ...EventType) { + t.Events = append(t.Events, events...) +} + +// AddFilterSuffix sets the suffix configuration to the current notification config +func (t *Config) AddFilterSuffix(suffix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "suffix", Value: suffix} + // Replace any suffix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "suffix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// AddFilterPrefix sets the prefix configuration to the current notification config +func (t *Config) AddFilterPrefix(prefix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "prefix", Value: prefix} + // Replace any prefix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "prefix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// EqualEventTypeList tells whether a and b contain the same events +func EqualEventTypeList(a, b []EventType) bool { + if len(a) != len(b) { + return false + } + setA := set.NewStringSet() + for _, i := range a { + setA.Add(string(i)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(string(i)) + } + + return setA.Difference(setB).IsEmpty() +} + +// EqualFilterRuleList tells whether a and b contain the same filters +func EqualFilterRuleList(a, b []FilterRule) bool { + if len(a) != len(b) { + return false + } + + setA := set.NewStringSet() + for _, i := range a { + setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + return setA.Difference(setB).IsEmpty() +} + +// Equal returns whether this `Config` is equal to another defined by the passed parameters +func (t *Config) Equal(events []EventType, prefix, suffix string) bool { + if t == nil { + return false + } + + // Compare events + passEvents := EqualEventTypeList(t.Events, events) + + // Compare filters + var newFilterRules []FilterRule + if prefix != "" { + newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix}) + } + if suffix != "" { + newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix}) + } + + var currentFilterRules []FilterRule + if t.Filter != nil { + currentFilterRules = t.Filter.S3Key.FilterRules + } + + passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules) + return passEvents && passFilters +} + +// TopicConfig carries one single topic notification configuration +type TopicConfig struct { + Config + Topic string `xml:"Topic"` +} + +// QueueConfig carries one single queue notification configuration +type QueueConfig struct { + Config + Queue string `xml:"Queue"` +} + +// LambdaConfig carries one single cloudfunction notification configuration +type LambdaConfig struct { + Config + Lambda string `xml:"CloudFunction"` +} + +// Configuration - the struct that represents the whole XML to be sent to the web service +type Configuration struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` + TopicConfigs []TopicConfig `xml:"TopicConfiguration"` + QueueConfigs []QueueConfig `xml:"QueueConfiguration"` +} + +// AddTopic adds a given topic config to the general bucket notification config +func (b *Configuration) AddTopic(topicConfig Config) bool { + newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()} + for _, n := range b.TopicConfigs { + // If new config matches existing one + if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range topicConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) + return true +} + +// AddQueue adds a given queue config to the general bucket notification config +func (b *Configuration) AddQueue(queueConfig Config) bool { + newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} + for _, n := range b.QueueConfigs { + if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range queueConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) + return true +} + +// AddLambda adds a given lambda config to the general bucket notification config +func (b *Configuration) AddLambda(lambdaConfig Config) bool { + newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} + for _, n := range b.LambdaConfigs { + if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range lambdaConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) + return true +} + +// RemoveTopicByArn removes all topic configurations that match the exact specified ARN +func (b *Configuration) RemoveTopicByArn(arn Arn) { + var topics []TopicConfig + for _, topic := range b.TopicConfigs { + if topic.Topic != arn.String() { + topics = append(topics, topic) + } + } + b.TopicConfigs = topics +} + +// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete +var ErrNoConfigMatch = errors.New("no notification configuration matched") + +// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.TopicConfigs { + // if it matches events and filters, mark the index for deletion + if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + +// RemoveQueueByArn removes all queue configurations that match the exact specified ARN +func (b *Configuration) RemoveQueueByArn(arn Arn) { + var queues []QueueConfig + for _, queue := range b.QueueConfigs { + if queue.Queue != arn.String() { + queues = append(queues, queue) + } + } + b.QueueConfigs = queues +} + +// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.QueueConfigs { + // if it matches events and filters, mark the index for deletion + if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + +// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN +func (b *Configuration) RemoveLambdaByArn(arn Arn) { + var lambdas []LambdaConfig + for _, lambda := range b.LambdaConfigs { + if lambda.Lambda != arn.String() { + lambdas = append(lambdas, lambda) + } + } + b.LambdaConfigs = lambdas +} + +// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.LambdaConfigs { + // if it matches events and filters, mark the index for deletion + if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go new file mode 100644 index 00000000..97abf8df --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -0,0 +1,746 @@ +/* + * MinIO Client (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package replication + +import ( + "bytes" + "encoding/xml" + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/rs/xid" +) + +var errInvalidFilter = fmt.Errorf("invalid filter") + +// OptionType specifies operation to be performed on config +type OptionType string + +const ( + // AddOption specifies addition of rule to config + AddOption OptionType = "Add" + // SetOption specifies modification of existing rule to config + SetOption OptionType = "Set" + + // RemoveOption specifies rule options are for removing a rule + RemoveOption OptionType = "Remove" + // ImportOption is for getting current config + ImportOption OptionType = "Import" +) + +// Options represents options to set a replication configuration rule +type Options struct { + Op OptionType + RoleArn string + ID string + Prefix string + RuleStatus string + Priority string + TagString string + StorageClass string + DestBucket string + IsTagSet bool + IsSCSet bool + ReplicateDeletes string // replicate versioned deletes + ReplicateDeleteMarkers string // replicate soft deletes + ReplicaSync string // replicate replica metadata modifications + ExistingObjectReplicate string +} + +// Tags returns a slice of tags for a rule +func (opts Options) Tags() ([]Tag, error) { + var tagList []Tag + tagTokens := strings.Split(opts.TagString, "&") + for _, tok := range tagTokens { + if tok == "" { + break + } + kv := strings.SplitN(tok, "=", 2) + if len(kv) != 2 { + return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs") + } + tagList = append(tagList, Tag{ + Key: kv[0], + Value: kv[1], + }) + } + return tagList, nil +} + +// Config - replication configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type Config struct { + XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"` + Rules []Rule `xml:"Rule" json:"Rules"` + Role string `xml:"Role" json:"Role"` +} + +// Empty returns true if config is not set +func (c *Config) Empty() bool { + return len(c.Rules) == 0 +} + +// AddRule adds a new rule to existing replication config. If a rule exists with the +// same ID, then the rule is replaced. +func (c *Config) AddRule(opts Options) error { + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite + if opts.RoleArn != "" { + tokens := strings.Split(opts.RoleArn, ":") + if len(tokens) != 6 { + return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn) + } + switch { + case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0: + c.Role = opts.RoleArn + compatSw = true + case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"): + c.Role = opts.RoleArn + default: + return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn) + } + } + + var status Status + // toggle rule status for edit option + switch opts.RuleStatus { + case "enable": + status = Enabled + case "disable": + status = Disabled + default: + return fmt.Errorf("rule state should be either [enable|disable]") + } + + tags, err := opts.Tags() + if err != nil { + return err + } + andVal := And{ + Tags: tags, + } + filter := Filter{Prefix: opts.Prefix} + // only a single tag is set. + if opts.Prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || opts.Prefix != "" { + filter.And = andVal + filter.And.Prefix = opts.Prefix + filter.Prefix = "" + filter.Tag = Tag{} + } + if opts.ID == "" { + opts.ID = xid.New().String() + } + + destBucket := opts.DestBucket + // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html + if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { + if len(btokens) == 1 && compatSw { + destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) + } else { + return fmt.Errorf("destination bucket needs to be in Arn format") + } + } + dmStatus := Disabled + if opts.ReplicateDeleteMarkers != "" { + switch opts.ReplicateDeleteMarkers { + case "enable": + dmStatus = Enabled + case "disable": + dmStatus = Disabled + default: + return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable") + } + } + + vDeleteStatus := Disabled + if opts.ReplicateDeletes != "" { + switch opts.ReplicateDeletes { + case "enable": + vDeleteStatus = Enabled + case "disable": + vDeleteStatus = Disabled + default: + return fmt.Errorf("ReplicateDeletes should be either enable|disable") + } + } + var replicaSync Status + // replica sync is by default Enabled, unless specified. + switch opts.ReplicaSync { + case "enable", "": + replicaSync = Enabled + case "disable": + replicaSync = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + + var existingStatus Status + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + existingStatus = Enabled + case "disable", "": + existingStatus = Disabled + default: + return fmt.Errorf("existingObjectReplicate should be either enable|disable") + } + } + newRule := Rule{ + ID: opts.ID, + Priority: priority, + Status: status, + Filter: filter, + Destination: Destination{ + Bucket: destBucket, + StorageClass: opts.StorageClass, + }, + DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus}, + DeleteReplication: DeleteReplication{Status: vDeleteStatus}, + // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow + // automatic failover as the expectation in this case is that replica and source should be identical. + // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html + SourceSelectionCriteria: SourceSelectionCriteria{ + ReplicaModifications: ReplicaModifications{ + Status: replicaSync, + }, + }, + // By default disable existing object replication unless selected + ExistingObjectReplication: ExistingObjectReplication{ + Status: existingStatus, + }, + } + + // validate rule after overlaying priority for pre-existing rule being disabled. + if err := newRule.Validate(); err != nil { + return err + } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + + for _, rule := range c.Rules { + if rule.Priority == newRule.Priority { + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.ID == newRule.ID { + return fmt.Errorf("a rule exists with this ID") + } + } + + c.Rules = append(c.Rules, newRule) + return nil +} + +// EditRule modifies an existing rule in replication config +func (c *Config) EditRule(opts Options) error { + if opts.ID == "" { + return fmt.Errorf("rule ID missing") + } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS. + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + + rIdx := -1 + var newRule Rule + for i, rule := range c.Rules { + if rule.ID == opts.ID { + rIdx = i + newRule = rule + break + } + } + if rIdx < 0 { + return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID) + } + prefixChg := opts.Prefix != newRule.Prefix() + if opts.IsTagSet || prefixChg { + prefix := newRule.Prefix() + if prefix != opts.Prefix { + prefix = opts.Prefix + } + tags := []Tag{newRule.Filter.Tag} + if len(newRule.Filter.And.Tags) != 0 { + tags = newRule.Filter.And.Tags + } + var err error + if opts.IsTagSet { + tags, err = opts.Tags() + if err != nil { + return err + } + } + andVal := And{ + Tags: tags, + } + + filter := Filter{Prefix: prefix} + // only a single tag is set. + if prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || prefix != "" { + filter.And = andVal + filter.And.Prefix = prefix + filter.Prefix = "" + filter.Tag = Tag{} + } + newRule.Filter = filter + } + + // toggle rule status for edit option + if opts.RuleStatus != "" { + switch opts.RuleStatus { + case "enable": + newRule.Status = Enabled + case "disable": + newRule.Status = Disabled + default: + return fmt.Errorf("rule state should be either [enable|disable]") + } + } + // set DeleteMarkerReplication rule status for edit option + if opts.ReplicateDeleteMarkers != "" { + switch opts.ReplicateDeleteMarkers { + case "enable": + newRule.DeleteMarkerReplication.Status = Enabled + case "disable": + newRule.DeleteMarkerReplication.Status = Disabled + default: + return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]") + } + } + + // set DeleteReplication rule status for edit option. This is a MinIO specific + // option to replicate versioned deletes + if opts.ReplicateDeletes != "" { + switch opts.ReplicateDeletes { + case "enable": + newRule.DeleteReplication.Status = Enabled + case "disable": + newRule.DeleteReplication.Status = Disabled + default: + return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]") + } + } + + if opts.ReplicaSync != "" { + switch opts.ReplicaSync { + case "enable", "": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled + case "disable": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + } + + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + newRule.ExistingObjectReplication.Status = Enabled + case "disable": + newRule.ExistingObjectReplication.Status = Disabled + default: + return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]") + } + } + if opts.IsSCSet { + newRule.Destination.StorageClass = opts.StorageClass + } + if opts.Priority != "" { + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + newRule.Priority = priority + } + if opts.DestBucket != "" { + destBucket := opts.DestBucket + // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html + if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { + return fmt.Errorf("destination bucket needs to be in Arn format") + } + newRule.Destination.Bucket = destBucket + } + // validate rule + if err := newRule.Validate(); err != nil { + return err + } + // ensure priority and destination bucket restrictions are not violated + for idx, rule := range c.Rules { + if rule.Priority == newRule.Priority && rIdx != idx { + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID { + return fmt.Errorf("invalid destination bucket for this rule") + } + } + + c.Rules[rIdx] = newRule + return nil +} + +// RemoveRule removes a rule from replication config. +func (c *Config) RemoveRule(opts Options) error { + var newRules []Rule + ruleFound := false + for _, rule := range c.Rules { + if rule.ID != opts.ID { + newRules = append(newRules, rule) + continue + } + ruleFound = true + } + if !ruleFound { + return fmt.Errorf("Rule with ID %s not found", opts.ID) + } + if len(newRules) == 0 { + return fmt.Errorf("replication configuration should have at least one rule") + } + c.Rules = newRules + return nil +} + +// Rule - a rule for replication configuration. +type Rule struct { + XMLName xml.Name `xml:"Rule" json:"-"` + ID string `xml:"ID,omitempty"` + Status Status `xml:"Status"` + Priority int `xml:"Priority"` + DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` + DeleteReplication DeleteReplication `xml:"DeleteReplication"` + Destination Destination `xml:"Destination"` + Filter Filter `xml:"Filter" json:"Filter"` + SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` + ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` +} + +// Validate validates the rule for correctness +func (r Rule) Validate() error { + if err := r.validateID(); err != nil { + return err + } + if err := r.validateStatus(); err != nil { + return err + } + if err := r.validateFilter(); err != nil { + return err + } + + if r.Priority < 0 && r.Status == Enabled { + return fmt.Errorf("priority must be set for the rule") + } + + if err := r.validateStatus(); err != nil { + return err + } + return r.ExistingObjectReplication.Validate() +} + +// validateID - checks if ID is valid or not. +func (r Rule) validateID() error { + // cannot be longer than 255 characters + if len(r.ID) > 255 { + return fmt.Errorf("ID must be less than 255 characters") + } + return nil +} + +// validateStatus - checks if status is valid or not. +func (r Rule) validateStatus() error { + // Status can't be empty + if len(r.Status) == 0 { + return fmt.Errorf("status cannot be empty") + } + + // Status must be one of Enabled or Disabled + if r.Status != Enabled && r.Status != Disabled { + return fmt.Errorf("status must be set to either Enabled or Disabled") + } + return nil +} + +func (r Rule) validateFilter() error { + return r.Filter.Validate() +} + +// Prefix - a rule can either have prefix under or under +// . This method returns the prefix from the +// location where it is available +func (r Rule) Prefix() string { + if r.Filter.Prefix != "" { + return r.Filter.Prefix + } + return r.Filter.And.Prefix +} + +// Tags - a rule can either have tag under or under +// . This method returns all the tags from the +// rule in the format tag1=value1&tag2=value2 +func (r Rule) Tags() string { + ts := []Tag{r.Filter.Tag} + if len(r.Filter.And.Tags) != 0 { + ts = r.Filter.And.Tags + } + + var buf bytes.Buffer + for _, t := range ts { + if buf.Len() > 0 { + buf.WriteString("&") + } + buf.WriteString(t.String()) + } + return buf.String() +} + +// Filter - a filter for a replication configuration Rule. +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + Prefix string `json:"Prefix,omitempty"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// Validate - validates the filter element +func (f Filter) Validate() error { + // A Filter must have exactly one of Prefix, Tag, or And specified. + if !f.And.isEmpty() { + if f.Prefix != "" { + return errInvalidFilter + } + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if f.Prefix != "" { + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if !f.Tag.IsEmpty() { + if err := f.Tag.Validate(); err != nil { + return err + } + } + return nil +} + +// Tag - a tag for a replication configuration Rule filter. +type Tag struct { + XMLName xml.Name `json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +func (tag Tag) String() string { + if tag.IsEmpty() { + return "" + } + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { + return fmt.Errorf("invalid Tag Key") + } + + if utf8.RuneCountInString(tag.Value) > 256 { + return fmt.Errorf("invalid Tag Value") + } + return nil +} + +// Destination - destination in ReplicationConfiguration. +type Destination struct { + XMLName xml.Name `xml:"Destination" json:"-"` + Bucket string `xml:"Bucket" json:"Bucket"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` +} + +// And - a tag to combine a prefix and multiple tags for replication configuration rule. +type And struct { + XMLName xml.Name `xml:"And,omitempty" json:"-"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// isEmpty returns true if Tags field is null +func (a And) isEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" +} + +// Status represents Enabled/Disabled status +type Status string + +// Supported status types +const ( + Enabled Status = "Enabled" + Disabled Status = "Disabled" +) + +// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type DeleteMarkerReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (d DeleteMarkerReplication) IsEmpty() bool { + return len(d.Status) == 0 +} + +// DeleteReplication - whether versioned deletes are replicated - this +// is a MinIO specific extension +type DeleteReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteReplication is not set +func (d DeleteReplication) IsEmpty() bool { + return len(d.Status) == 0 +} + +// ReplicaModifications specifies if replica modification sync is enabled +type ReplicaModifications struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default +} + +// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration. +type SourceSelectionCriteria struct { + ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"` +} + +// IsValid - checks whether SourceSelectionCriteria is valid or not. +func (s SourceSelectionCriteria) IsValid() bool { + return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled +} + +// Validate source selection criteria +func (s SourceSelectionCriteria) Validate() error { + if (s == SourceSelectionCriteria{}) { + return nil + } + if !s.IsValid() { + return fmt.Errorf("invalid ReplicaModification status") + } + return nil +} + +// ExistingObjectReplication - whether existing object replication is enabled +type ExistingObjectReplication struct { + Status Status `xml:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (e ExistingObjectReplication) IsEmpty() bool { + return len(e.Status) == 0 +} + +// Validate validates whether the status is disabled. +func (e ExistingObjectReplication) Validate() error { + if e.IsEmpty() { + return nil + } + if e.Status != Disabled && e.Status != Enabled { + return fmt.Errorf("invalid ExistingObjectReplication status") + } + return nil +} + +// TargetMetrics represents inline replication metrics +// such as pending, failed and completed bytes in total for a bucket remote target +type TargetMetrics struct { + // Pending size in bytes + PendingSize uint64 `json:"pendingReplicationSize"` + // Completed size in bytes + ReplicatedSize uint64 `json:"completedReplicationSize"` + // Total Replica size in bytes + ReplicaSize uint64 `json:"replicaSize"` + // Failed size in bytes + FailedSize uint64 `json:"failedReplicationSize"` + // Total number of pending operations including metadata updates + PendingCount uint64 `json:"pendingReplicationCount"` + // Total number of failed operations including metadata updates + FailedCount uint64 `json:"failedReplicationCount"` +} + +// Metrics represents inline replication metrics for a bucket. +type Metrics struct { + Stats map[string]TargetMetrics + // Total Pending size in bytes across targets + PendingSize uint64 `json:"pendingReplicationSize"` + // Completed size in bytes across targets + ReplicatedSize uint64 `json:"completedReplicationSize"` + // Total Replica size in bytes across targets + ReplicaSize uint64 `json:"replicaSize"` + // Failed size in bytes across targets + FailedSize uint64 `json:"failedReplicationSize"` + // Total number of pending operations including metadata updates across targets + PendingCount uint64 `json:"pendingReplicationCount"` + // Total number of failed operations including metadata updates across targets + FailedCount uint64 `json:"failedReplicationCount"` +} + +// ResyncTargetsInfo provides replication target information to resync replicated data. +type ResyncTargetsInfo struct { + Targets []ResyncTarget `json:"target,omitempty"` +} + +// ResyncTarget provides the replica resources and resetID to initiate resync replication. +type ResyncTarget struct { + Arn string `json:"arn"` + ResetID string `json:"resetid"` + StartTime time.Time `json:"startTime,omitempty"` + EndTime time.Time `json:"endTime,omitempty"` + // Status of resync operation + ResyncStatus string `json:"resyncStatus,omitempty"` + // Completed size in bytes + ReplicatedSize int64 `json:"completedReplicationSize,omitempty"` + // Failed size in bytes + FailedSize int64 `json:"failedReplicationSize,omitempty"` + // Total number of failed operations + FailedCount int64 `json:"failedReplicationCount,omitempty"` + // Total number of failed operations + ReplicatedCount int64 `json:"replicationCount,omitempty"` + // Last bucket/object replicated. + Bucket string `json:"bucket,omitempty"` + Object string `json:"object,omitempty"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go new file mode 100644 index 00000000..2f1a5a65 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -0,0 +1,408 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "errors" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start with a "." + if host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { + return parts[1] + } + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint. +func IsAliyunOSSEndpoint(endpointURL url.URL) bool { + return strings.HasSuffix(endpointURL.Host, "aliyuncs.com") +} + +// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. +func IsAmazonEndpoint(endpointURL url.URL) bool { + if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { + return true + } + return GetRegionFromURL(endpointURL) != "" +} + +// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. +func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + IsAmazonFIPSGovCloudEndpoint(endpointURL)) +} + +// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-fips.us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" +} + +// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + switch endpointURL.Host { + case "s3-fips.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-west-1.amazonaws.com": + case "s3-fips.dualstack.us-west-2.amazonaws.com": + case "s3-fips.dualstack.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-east-1.amazonaws.com": + case "s3-fips.us-west-1.amazonaws.com": + case "s3-fips.us-west-2.amazonaws.com": + case "s3-fips.us-east-1.amazonaws.com": + default: + return false + } + return true +} + +// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { + return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) +} + +// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint +// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html. +func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return amazonS3HostPrivateLink.MatchString(endpointURL.Host) +} + +// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. +func IsGoogleEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "storage.googleapis.com" +} + +// Expects ascii encoded strings - from output of urlEncodePath +func percentEncodeSlash(s string) string { + return strings.ReplaceAll(s, "/", "%2F") +} + +// QueryEncode - encodes query values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func QueryEncode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := percentEncodeSlash(EncodePath(k)) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// TagDecode - decodes canonical tag into map of key and value. +func TagDecode(ctag string) map[string]string { + if ctag == "" { + return map[string]string{} + } + tags := strings.Split(ctag, "&") + tagMap := make(map[string]string, len(tags)) + var err error + for _, tag := range tags { + kvs := strings.SplitN(tag, "=", 2) + if len(kvs) == 0 { + return map[string]string{} + } + if len(kvs) == 1 { + return map[string]string{} + } + tagMap[kvs[0]], err = url.PathUnescape(kvs[1]) + if err != nil { + continue + } + } + return tagMap +} + +// TagEncode - encodes tag values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func TagEncode(tags map[string]string) string { + if tags == nil { + return "" + } + values := url.Values{} + for k, v := range tags { + values[k] = []string{v} + } + return QueryEncode(values) +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname strings.Builder + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname.WriteRune(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname.WriteRune(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname.WriteString("%" + strings.ToUpper(hex)) + } + } + } + return encodedPathname.String() +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be shorter than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be longer than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectNamePrefix(objectName string) error { + if len(objectName) > 1024 { + return errors.New("Object name cannot be longer than 1024 characters") + } + if !utf8.ValidString(objectName) { + return errors.New("Object name with non UTF-8 strings are not supported") + } + return nil +} + +// CheckValidObjectName - checks if we have a valid input object name. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return errors.New("Object name cannot be empty") + } + return CheckValidObjectNamePrefix(objectName) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go new file mode 100644 index 00000000..c35e58e1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go @@ -0,0 +1,200 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import ( + "fmt" + "sort" + + jsoniter "github.com/json-iterator/go" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +// If 'data' contains JSON string array, the set contains each string. +// If 'data' contains JSON string, the set contains the string as one element. +// If 'data' contains Other JSON types, JSON parse error is returned. +func (set *StringSet) UnmarshalJSON(data []byte) error { + sl := []string{} + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(s) + } + } else { + var s string + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(s) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet) + for _, k := range sl { + set.Add(k) + } + return set +} + +// CopyStringSet - returns copy of given set. +func CopyStringSet(set StringSet) StringSet { + nset := NewStringSet() + for k, v := range set { + nset[k] = v + } + return nset +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go new file mode 100644 index 00000000..b1296d2b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -0,0 +1,306 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +// Reference for constants used below - +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming +const ( + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF +) + +// Request headers to be ignored while calculating seed signature for +// a request. +var ignoredStreamingHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, + "Content-Type": true, +} + +// getSignedChunkLength - calculates the length of chunk metadata +func getSignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + chunkSigConstLen + + signatureStrLen + + crlfLen + + chunkDataSize + + crlfLen +} + +// getStreamLength - calculates the length of the overall stream (data + metadata) +func getStreamLength(dataLen, chunkSize int64) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getSignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getSignedChunkLength(remainingBytes) + } + streamLen += getSignedChunkLength(0) + return streamLen +} + +// buildChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingPayloadHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + emptySHA256, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + // Set x-amz-content-sha256 header. + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildChunkHeader(chunkLen int64, signature string) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + +// getSeedSignature - returns the seed signature for a given request. +func (s *StreamingReader) setSeedSignature(req *http.Request) { + // Get canonical request + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req)) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3) + + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3) + + // Calculate signature. + s.seedSignature = getSignature(signingKey, stringToSign) +} + +// StreamingReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingReader struct { + accessKeyID string + secretAccessKey string + sessionToken string + region string + prevSignature string + seedSignature string + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + reqTime time.Time + chunkNum int + totalChunks int + lastChunkSize int +} + +// signChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingReader) signChunk(chunkLen int) { + // Compute chunk signature for next header + signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + // Write chunk header into streaming buffer + chunkHdr := buildChunkHeader(int64(chunkLen), signature) + s.buf.Write(chunkHdr) + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + s.buf.Write([]byte("\r\n")) + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// setStreamingAuthHeader - builds and sets authorization header value +// for streaming signature. +func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { + credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3) + authParts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), + "Signature=" + s.seedSignature, + } + + // Set authorization header. + auth := strings.Join(authParts, ",") + req.Header.Set("Authorization", auth) +} + +// StreamingSignV4 - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, + region string, dataLen int64, reqTime time.Time, +) *http.Request { + // Set headers needed for streaming signature. + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + sessionToken: sessionToken, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.signChunk(s.chunkBufLen) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.signChunk(0) + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go new file mode 100644 index 00000000..cf7921d1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -0,0 +1,316 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { + if virtualHost { + reqHost := getHostAddr(req) + dotPos := strings.Index(reqHost, ".") + if dotPos > -1 { + bucketName := reqHost[:dotPos] + path = "/" + bucketName + path += req.URL.Path + path = s3utils.EncodePath(path) + return + } + } + path = s3utils.EncodePath(req.URL.Path) + return +} + +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + d := time.Now().UTC() + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + + // Encode query and save. + req.URL.RawQuery = s3utils.QueryEncode(query) + + // Save signature finally. + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + + // Return. + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := stringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func stringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } +} + +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// +// This list should be kept alphabetically sorted, do not hastily edit. +var resourceList = []string{ + "acl", + "cors", + "delete", + "encryption", + "legal-hold", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "replication", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "retention", + "select", + "select-type", + "tagging", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { + // Save request URL. + requestURL := req.URL + // Get encoded URL path. + buf.WriteString(encodeURL2Path(&req, virtualHost)) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(vv[0]) + } + } + } + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go new file mode 100644 index 00000000..ce64c37d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -0,0 +1,331 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +// Different service types +const ( + ServiceTypeS3 = "s3" + ServiceTypeSTS = "sts" +) + +// +// Excerpts from @lsegal - +// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +// +// User-Agent: +// +// This is ignored from signing because signing this causes +// problems with generating pre-signed URLs (that are executed +// by other agents) or when customers pass requests through +// proxies, which may modify the user-agent. +// +// +// Authorization: +// +// Is skipped for obvious reasons +// +var v4IgnoredHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte(serviceType)) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time, serviceType string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + serviceType, + "aws4_request", + }, "/") + return scope +} + +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string { + scope := getScope(location, t, serviceType) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = unsignedPayload + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + if !headerExists("host", headers) { + headers = append(headers, "host") + } + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(getHostAddr(&req)) + buf.WriteByte('\n') + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +func headerExists(key string, headers []string) bool { + for _, k := range headers { + if k == key { + return true + } + } + return false +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + if !headerExists("host", headers) { + headers = append(headers, "host") + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { + req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") + canonicalRequest := strings.Join([]string{ + req.Method, + s3utils.EncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req, ignoredHeaders), + getSignedHeaders(req, ignoredHeaders), + hashedPayload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" + stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := GetCredential(accessKeyID, location, t, ServiceTypeS3) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + // Set session token if available. + if sessionToken != "" { + query.Set("X-Amz-Security-Token", sessionToken) + } + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req)) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4STS - signature v4 for STS request. +func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) +} + +// Internal function called for different service types. +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Set session token if available. + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + hashedPayload := getHashedPayload(req) + if serviceType == ServiceTypeSTS { + // Content sha256 header is not sent with the request + // but it is expected to have sha256 of payload for signature + // in STS service type request. + req.Header.Del("X-Amz-Content-Sha256") + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t, serviceType) + + // Get credential string. + credential := GetCredential(accessKeyID, location, t, serviceType) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return &req +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go new file mode 100644 index 00000000..b54fa4c7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -0,0 +1,63 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "crypto/hmac" + "net/http" + "strings" + + "github.com/minio/sha256-simd" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getHostAddr returns host header if available, otherwise returns host from URL +func getHostAddr(req *http.Request) string { + host := req.Header.Get("host") + if host != "" && req.Host != host { + return host + } + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go new file mode 100644 index 00000000..b5fb9565 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go @@ -0,0 +1,66 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sse + +import "encoding/xml" + +// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate +// KMS, SSEAlgoritm needs to be set to "aws:kms" +// Minio currently does not support Kms. +type ApplySSEByDefault struct { + KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` + SSEAlgorithm string `xml:"SSEAlgorithm"` +} + +// Rule layer encapsulates default encryption configuration +type Rule struct { + Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Configuration is the default encryption configuration structure +type Configuration struct { + XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` + Rules []Rule `xml:"Rule"` +} + +// NewConfigurationSSES3 initializes a new SSE-S3 configuration +func NewConfigurationSSES3() *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + SSEAlgorithm: "AES256", + }, + }, + }, + } +} + +// NewConfigurationSSEKMS initializes a new SSE-KMS configuration +func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + KmsMasterKeyID: kmsMasterKey, + SSEAlgorithm: "aws:kms", + }, + }, + }, + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go new file mode 100644 index 00000000..d7c65af5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go @@ -0,0 +1,341 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tags + +import ( + "encoding/xml" + "io" + "net/url" + "strings" + "unicode/utf8" +) + +// Error contains tag specific error. +type Error interface { + error + Code() string +} + +type errTag struct { + code string + message string +} + +// Code contains error code. +func (err errTag) Code() string { + return err.code +} + +// Error contains error message. +func (err errTag) Error() string { + return err.message +} + +var ( + errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"} + errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"} + errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"} + errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"} + errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"} +) + +// Tag comes with limitation as per +// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +const ( + maxKeyLength = 128 + maxValueLength = 256 + maxObjectTagCount = 10 + maxTagCount = 50 +) + +func checkKey(key string) error { + if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") { + return errInvalidTagKey + } + + return nil +} + +func checkValue(value string) error { + if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") { + return errInvalidTagValue + } + + return nil +} + +// Tag denotes key and value. +type Tag struct { + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +func (tag Tag) String() string { + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if err := checkKey(tag.Key); err != nil { + return err + } + + return checkValue(tag.Value) +} + +// MarshalXML encodes to XML data. +func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := tag.Validate(); err != nil { + return err + } + + type subTag Tag // to avoid recursively calling MarshalXML() + return e.EncodeElement(subTag(tag), start) +} + +// UnmarshalXML decodes XML data to tag. +func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type subTag Tag // to avoid recursively calling UnmarshalXML() + var st subTag + if err := d.DecodeElement(&st, &start); err != nil { + return err + } + + if err := Tag(st).Validate(); err != nil { + return err + } + + *tag = Tag(st) + return nil +} + +// tagSet represents list of unique tags. +type tagSet struct { + tagMap map[string]string + isObject bool +} + +func (tags tagSet) String() string { + vals := make(url.Values) + for key, value := range tags.tagMap { + vals.Set(key, value) + } + return vals.Encode() +} + +func (tags *tagSet) remove(key string) { + delete(tags.tagMap, key) +} + +func (tags *tagSet) set(key, value string, failOnExist bool) error { + if failOnExist { + if _, found := tags.tagMap[key]; found { + return errDuplicateTagKey + } + } + + if err := checkKey(key); err != nil { + return err + } + + if err := checkValue(value); err != nil { + return err + } + + if tags.isObject { + if len(tags.tagMap) == maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tags.tagMap) == maxTagCount { + return errTooManyTags + } + + tags.tagMap[key] = value + return nil +} + +func (tags tagSet) toMap() map[string]string { + m := make(map[string]string) + for key, value := range tags.tagMap { + m[key] = value + } + return m +} + +// MarshalXML encodes to XML data. +func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + for key, value := range tags.tagMap { + tagList.Tags = append(tagList.Tags, Tag{key, value}) + } + + return e.EncodeElement(tagList, start) +} + +// UnmarshalXML decodes XML data to tag list. +func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + if err := d.DecodeElement(&tagList, &start); err != nil { + return err + } + + if tags.isObject { + if len(tagList.Tags) > maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tagList.Tags) > maxTagCount { + return errTooManyTags + } + + m := map[string]string{} + for _, tag := range tagList.Tags { + if _, found := m[tag.Key]; found { + return errDuplicateTagKey + } + + m[tag.Key] = tag.Value + } + + tags.tagMap = m + return nil +} + +type tagging struct { + XMLName xml.Name `xml:"Tagging"` + TagSet *tagSet `xml:"TagSet"` +} + +// Tags is list of tags of XML request/response as per +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody +type Tags tagging + +func (tags Tags) String() string { + return tags.TagSet.String() +} + +// Remove removes a tag by its key. +func (tags *Tags) Remove(key string) { + tags.TagSet.remove(key) +} + +// Set sets new tag. +func (tags *Tags) Set(key, value string) error { + return tags.TagSet.set(key, value, false) +} + +// ToMap returns copy of tags. +func (tags Tags) ToMap() map[string]string { + return tags.TagSet.toMap() +} + +// MapToObjectTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToObjectTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, true) +} + +// MapToBucketTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToBucketTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, false) +} + +// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags. +func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + for key, value := range tagMap { + if err := tagging.TagSet.set(key, value, true); err != nil { + return nil, err + } + } + + return tagging, nil +} + +func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + if err := xml.NewDecoder(reader).Decode(tagging); err != nil { + return nil, err + } + + return tagging, nil +} + +// ParseBucketXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax. +func ParseBucketXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, false) +} + +// ParseObjectXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax +func ParseObjectXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, true) +} + +// Parse decodes HTTP query formatted string into tags which is limited by isObject. +// A query formatted string is like "key1=value1&key2=value2". +func Parse(s string, isObject bool) (*Tags, error) { + values, err := url.ParseQuery(s) + if err != nil { + return nil, err + } + + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + for key := range values { + if err := tagging.TagSet.set(key, values.Get(key), true); err != nil { + return nil, err + } + } + + return tagging, nil +} + +// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2". +func ParseObjectTags(s string) (*Tags, error) { + return Parse(s, true) +} diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go new file mode 100644 index 00000000..7aa96e0d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -0,0 +1,327 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/base64" + "fmt" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return errInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return errInvalidArgument("Object prefix is empty.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return errInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetCondition - Sets condition for credentials, date and algorithm +func (p *PostPolicy) SetCondition(matchType, condition, value string) error { + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("No value specified for condition") + } + + policyCond := policyCondition{ + matchType: matchType, + condition: "$" + condition, + value: value, + } + if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[condition] = value + return nil + } + return errInvalidArgument("Invalid condition in policy") +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return errInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentTypeStartsWith - Sets what content-type of the object for this policy +// based upload can start with. +func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { + if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" { + return errInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$Content-Type", + value: contentTypeStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentTypeStartsWith + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return errInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return errInvalidArgument("Minimum limit cannot be negative.") + } + if max < 0 { + return errInvalidArgument("Maximum limit cannot be negative.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { + if strings.TrimSpace(redirect) == "" || redirect == "" { + return errInvalidArgument("Redirect is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_redirect", + value: redirect, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_redirect"] = redirect + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return errInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key string, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key string, value string) error { + if key == "" { + return errInvalidArgument("Key is empty") + } + if value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return errInvalidArgument("Policy fields are empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// String function for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshaled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr += conditionsStr + retStr += "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshaled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go new file mode 100644 index 00000000..b54081d0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -0,0 +1,69 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "time" + +// newRetryTimerContinous creates a timer with exponentially increasing delays forever. +func (c *Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // 1< maxAttempt { + attempt = maxAttempt + } + // sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + var nextBackoff int + for { + select { + // Attempts starts. + case attemptCh <- nextBackoff: + nextBackoff++ + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(nextBackoff)) + } + }() + return attemptCh +} diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go new file mode 100644 index 00000000..f454e675 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -0,0 +1,125 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "time" +) + +// MaxRetry is the maximum number of retries before stopping. +var MaxRetry = 10 + +// MaxJitter will randomize over the full exponential backoff time +const MaxJitter = 1.0 + +// NoJitter disables the use of jitter for randomizing the exponential backoff time +const NoJitter = 0.0 + +// DefaultRetryUnit - default unit multiplicative per retry. +// defaults to 200 * time.Millisecond +var DefaultRetryUnit = 200 * time.Millisecond + +// DefaultRetryCap - Each retry attempt never waits no longer than +// this maximum time duration. +var DefaultRetryCap = time.Second + +// newRetryTimer creates a timer with exponentially increasing +// delays until the maximum retry attempts are reached. +func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { + attemptCh := make(chan int) + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + for i := 0; i < maxRetry; i++ { + select { + case attemptCh <- i + 1: + case <-ctx.Done(): + return + } + + select { + case <-time.After(exponentialBackoffWait(i)): + case <-ctx.Done(): + return + } + } + }() + return attemptCh +} + +// List of AWS S3 error codes which are retryable. +var retryableS3Codes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "InternalError": {}, + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "SlowDown": {}, + // Add more AWS S3 codes here. +} + +// isS3CodeRetryable - is s3 error code retryable. +func isS3CodeRetryable(s3Code string) (ok bool) { + _, ok = retryableS3Codes[s3Code] + return ok +} + +// List of HTTP status codes which are retryable. +var retryableHTTPStatusCodes = map[int]struct{}{ + 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + 499: {}, // client closed request, retry. A non-standard status code introduced by nginx. + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + http.StatusGatewayTimeout: {}, + // Add more HTTP status codes here. +} + +// isHTTPStatusRetryable - is HTTP error code retryable. +func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { + _, ok = retryableHTTPStatusCodes[httpStatusCode] + return ok +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go new file mode 100644 index 00000000..9c8f02c8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -0,0 +1,57 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// awsS3EndpointMap Amazon S3 endpoint map. +var awsS3EndpointMap = map[string]string{ + "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", + "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", + "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", + "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", + "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", + "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", + "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", + "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", + "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", + "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", + "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", + "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", + "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", + "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", + "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", + "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", + "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", + "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", + "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", + "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", + "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", + "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", + "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", +} + +// getS3Endpoint get Amazon S3 endpoint based on the bucket location. +func getS3Endpoint(bucketLocation string) (s3Endpoint string) { + s3Endpoint, ok := awsS3EndpointMap[bucketLocation] + if !ok { + // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. + s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" + } + return s3Endpoint +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go new file mode 100644 index 00000000..f365157e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-error.go @@ -0,0 +1,61 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. +} diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go new file mode 100644 index 00000000..a88477b7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/transport.go @@ -0,0 +1,84 @@ +//go:build go1.7 || go1.8 +// +build go1.7 go1.8 + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "net/http" + "os" + "time" +) + +// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows) +func mustGetSystemCertPool() *x509.CertPool { + pool, err := x509.SystemCertPool() + if err != nil { + return x509.NewCertPool() + } + return pool +} + +// DefaultTransport - this default transport is similar to +// http.DefaultTransport but with additional param DisableCompression +// is set to true to avoid decompressing content with 'gzip' encoding. +var DefaultTransport = func(secure bool) (*http.Transport, error) { + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 256, + MaxIdleConnsPerHost: 16, + ResponseHeaderTimeout: time.Minute, + IdleConnTimeout: time.Minute, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 10 * time.Second, + // Set this value so that the underlying transport round-tripper + // doesn't try to auto decode the body of objects with + // content-encoding set to `gzip`. + // + // Refer: + // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 + DisableCompression: true, + } + + if secure { + tr.TLSClientConfig = &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + } + if f := os.Getenv("SSL_CERT_FILE"); f != "" { + rootCAs := mustGetSystemCertPool() + data, err := ioutil.ReadFile(f) + if err == nil { + rootCAs.AppendCertsFromPEM(data) + } + tr.TLSClientConfig.RootCAs = rootCAs + } + } + return tr, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go new file mode 100644 index 00000000..3ebe7b29 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -0,0 +1,623 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "time" + + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/sha256-simd" +) + +func trimEtag(etag string) string { + etag = strings.TrimPrefix(etag, "\"") + return strings.TrimSuffix(etag, "\"") +} + +var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) + +func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { + if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { + expTime, err := parseRFC7231Time(matches[1]) + if err != nil { + return time.Time{}, "" + } + return expTime, matches[2] + } + return time.Time{}, "" +} + +var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`) + +func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) { + matches := restoreRegex.FindStringSubmatch(restore) + if len(matches) != 4 { + return false, time.Time{}, errors.New("unexpected restore header") + } + ongoing, err = strconv.ParseBool(matches[1]) + if err != nil { + return false, time.Time{}, err + } + if matches[3] != "" { + expTime, err = parseRFC7231Time(matches[3]) + if err != nil { + return false, time.Time{}, err + } + } + return +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256sum for an input byte array, returns hex encoded. +func sum256Hex(data []byte) string { + hash := newSHA256Hasher() + defer hash.Close() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. +func sumMD5Base64(data []byte) string { + hash := newMd5Hasher() + defer hash.Close() + hash.Write(data) + return base64.StdEncoding.EncodeToString(hash.Sum(nil)) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { + // If secure is false, use 'http' scheme. + scheme := "https" + if !secure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(*endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +var ( + // Hex encoded string of nil sha256sum bytes. + emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sentinel URL is the default url value which is invalid. + sentinelURL = url.URL{} +) + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL url.URL) error { + if endpointURL == sentinelURL { + return errInvalidArgument("Endpoint url cannot be empty.") + } + if endpointURL.Path != "/" && endpointURL.Path != "" { + return errInvalidArgument("Endpoint url cannot have fully qualified paths.") + } + host := endpointURL.Hostname() + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards." + return errInvalidArgument(msg) + } + + if strings.Contains(host, ".s3.amazonaws.com") { + if !s3utils.IsAmazonEndpoint(endpointURL) { + return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + } + } + if strings.Contains(host, ".googleapis.com") { + if !s3utils.IsGoogleEndpoint(endpointURL) { + return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + } + } + return nil +} + +// Verify if input expires value is valid. +func isValidExpiry(expires time.Duration) error { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 { + return errInvalidArgument("Expires cannot be lesser than 1 second.") + } + if expireSeconds > 604800 { + return errInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + preserveKeys := []string{ + "Content-Type", + "Cache-Control", + "Content-Encoding", + "Content-Language", + "Content-Disposition", + "X-Amz-Storage-Class", + "X-Amz-Object-Lock-Mode", + "X-Amz-Object-Lock-Retain-Until-Date", + "X-Amz-Object-Lock-Legal-Hold", + "X-Amz-Website-Redirect-Location", + "X-Amz-Server-Side-Encryption", + "X-Amz-Tagging-Count", + "X-Amz-Meta-", + // Add new headers to be preserved. + // if you add new headers here, please extend + // PutObjectOptions{} to preserve them + // upon upload as well. + } + filteredHeader := make(http.Header) + for k, v := range header { + var found bool + for _, prefix := range preserveKeys { + if !strings.HasPrefix(k, prefix) { + continue + } + found = true + break + } + if found { + filteredHeader[k] = v + } + } + return filteredHeader +} + +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +func parseTime(t string, formats ...string) (time.Time, error) { + for _, format := range formats { + tt, err := time.Parse(format, t) + if err == nil { + return tt, nil + } + } + return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats) +} + +func parseRFC7231Time(lastModified string) (time.Time, error) { + return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear) +} + +// ToObjectInfo converts http header values into ObjectInfo type, +// extracts metadata and fills in all the necessary fields in ObjectInfo. +func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) { + var err error + // Trim off the odd double quotes from ETag in the beginning and end. + etag := trimEtag(h.Get("ETag")) + + // Parse content length is exists + var size int64 = -1 + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) + if err != nil { + // Content-Length is not valid + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // Parse Last-Modified has http time format. + mtime, err := parseRFC7231Time(h.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + + // Fetch content type if any present. + contentType := strings.TrimSpace(h.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + expiryStr := h.Get("Expires") + var expiry time.Time + if expiryStr != "" { + expiry, err = parseRFC7231Time(expiryStr) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + metadata := extractObjMetadata(h) + userMetadata := make(map[string]string) + for k, v := range metadata { + if strings.HasPrefix(k, "X-Amz-Meta-") { + userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0] + } + } + userTags := s3utils.TagDecode(h.Get(amzTaggingHeader)) + + var tagCount int + if count := h.Get(amzTaggingCount); count != "" { + tagCount, err = strconv.Atoi(count) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // Nil if not found + var restore *RestoreInfo + if restoreHdr := h.Get(amzRestore); restoreHdr != "" { + ongoing, expTime, err := amzRestoreToStruct(restoreHdr) + if err != nil { + return ObjectInfo{}, err + } + restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime} + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) + + deleteMarker := h.Get(amzDeleteMarker) == "true" + + // Save object metadata info. + return ObjectInfo{ + ETag: etag, + Key: objectName, + Size: size, + LastModified: mtime, + ContentType: contentType, + Expires: expiry, + VersionID: h.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationStatus: h.Get(amzReplicationStatus), + Expiration: expTime, + ExpirationRuleID: ruleID, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: metadata, + UserMetadata: userMetadata, + UserTags: userTags, + UserTagCount: tagCount, + Restore: restore, + }, nil +} + +var readFull = func(r io.Reader, buf []byte) (n int, err error) { + // ReadFull reads exactly len(buf) bytes from r into buf. + // It returns the number of bytes copied and an error if + // fewer bytes were read. The error is EOF only if no bytes + // were read. If an EOF happens after reading some but not + // all the bytes, ReadFull returns ErrUnexpectedEOF. + // On return, n == len(buf) if and only if err == nil. + // If r returns an error having read at least len(buf) bytes, + // the error is dropped. + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + // Some spurious io.Reader's return + // io.ErrUnexpectedEOF when nn == 0 + // this behavior is undocumented + // so we are on purpose not using io.ReadFull + // implementation because this can lead + // to custom handling, to avoid that + // we simply modify the original io.ReadFull + // implementation to avoid this issue. + // io.ErrUnexpectedEOF with nn == 0 really + // means that io.EOF + if err == io.ErrUnexpectedEOF && nn == 0 { + err = io.EOF + } + n += nn + } + if n >= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Redact out signature value from authorization string. +func redactSignature(origAuth string) string { + if !strings.HasPrefix(origAuth, signV4Algorithm) { + // Set a temporary redacted auth + return "AWS **REDACTED**:**REDACTED**" + } + + // Signature V4 authorization header. + + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") +} + +// Get default location returns the location based on the input +// URL `u`, if region override is provided then all location +// defaults to regionOverride. +// +// If no other cases match then the location is set to `us-east-1` +// as a last resort. +func getDefaultLocation(u url.URL, regionOverride string) (location string) { + if regionOverride != "" { + return regionOverride + } + region := s3utils.GetRegionFromURL(u) + if region == "" { + region = "us-east-1" + } + return region +} + +var supportedHeaders = map[string]bool{ + "content-type": true, + "cache-control": true, + "content-encoding": true, + "content-disposition": true, + "content-language": true, + "x-amz-website-redirect-location": true, + "x-amz-object-lock-mode": true, + "x-amz-metadata-directive": true, + "x-amz-object-lock-retain-until-date": true, + "expires": true, + "x-amz-replication-status": true, + // Add more supported headers here. + // Must be lower case. +} + +// isStorageClassHeader returns true if the header is a supported storage class header +func isStorageClassHeader(headerKey string) bool { + return strings.EqualFold(amzStorageClass, headerKey) +} + +// isStandardHeader returns true if header is a supported header and not a custom header +func isStandardHeader(headerKey string) bool { + return supportedHeaders[strings.ToLower(headerKey)] +} + +// sseHeaders is list of server side encryption headers +var sseHeaders = map[string]bool{ + "x-amz-server-side-encryption": true, + "x-amz-server-side-encryption-aws-kms-key-id": true, + "x-amz-server-side-encryption-context": true, + "x-amz-server-side-encryption-customer-algorithm": true, + "x-amz-server-side-encryption-customer-key": true, + "x-amz-server-side-encryption-customer-key-md5": true, + // Add more supported headers here. + // Must be lower case. +} + +// isSSEHeader returns true if header is a server side encryption header. +func isSSEHeader(headerKey string) bool { + return sseHeaders[strings.ToLower(headerKey)] +} + +// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. +func isAmzHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + + return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) +} + +var ( + md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} + sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} +) + +func newMd5Hasher() md5simd.Hasher { + return hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true} +} + +func newSHA256Hasher() md5simd.Hasher { + return hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} +} + +// hashWrapper implements the md5simd.Hasher interface. +type hashWrapper struct { + hash.Hash + isMD5 bool + isSHA256 bool +} + +// Close will put the hasher back into the pool. +func (m hashWrapper) Close() { + if m.isMD5 && m.Hash != nil { + m.Reset() + md5Pool.Put(m.Hash) + } + if m.isSHA256 && m.Hash != nil { + m.Reset() + sha256Pool.Put(m.Hash) + } + m.Hash = nil +} + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +// IsNetworkOrHostDown - if there was a network error or if the host is down. +// expectTimeouts indicates that *context* timeouts are expected and does not +// indicate a downed host. Other timeouts still returns down. +func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { + if err == nil { + return false + } + + if errors.Is(err, context.Canceled) { + return false + } + + if expectTimeouts && errors.Is(err, context.DeadlineExceeded) { + return false + } + + if errors.Is(err, context.DeadlineExceeded) { + return true + } + + // We need to figure if the error either a timeout + // or a non-temporary error. + urlErr := &url.Error{} + if errors.As(err, &urlErr) { + switch urlErr.Err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError: + return true + } + } + var e net.Error + if errors.As(err, &e) { + if e.Timeout() { + return true + } + } + + // Fallback to other mechanisms. + switch { + case strings.Contains(err.Error(), "Connection closed by foreign host"): + return true + case strings.Contains(err.Error(), "TLS handshake timeout"): + // If error is - tlsHandshakeTimeoutError. + return true + case strings.Contains(err.Error(), "i/o timeout"): + // If error is - tcp timeoutError. + return true + case strings.Contains(err.Error(), "connection timed out"): + // If err is a net.Dial timeout. + return true + case strings.Contains(err.Error(), "connection refused"): + // If err is connection refused + return true + + case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): + // Denial errors + return true + } + return false +} diff --git a/vendor/github.com/minio/sha256-simd/.gitignore b/vendor/github.com/minio/sha256-simd/.gitignore new file mode 100644 index 00000000..c56069fe --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/.gitignore @@ -0,0 +1 @@ +*.test \ No newline at end of file diff --git a/vendor/github.com/minio/sha256-simd/LICENSE b/vendor/github.com/minio/sha256-simd/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/sha256-simd/README.md b/vendor/github.com/minio/sha256-simd/README.md new file mode 100644 index 00000000..6117488d --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/README.md @@ -0,0 +1,137 @@ +# sha256-simd + +Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions for x86 and ARM64 for ARM. +On AVX512 it provides an up to 8x improvement (over 3 GB/s per core). +SHA Extensions give a performance boost of close to 4x over native. + +## Introduction + +This package is designed as a replacement for `crypto/sha256`. +For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement. + +This package uses Golang assembly. +The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al. + +## Support for Intel SHA Extensions + +Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)). + +``` +$ benchcmp avx2.txt sha-ext.txt +benchmark AVX2 MB/s SHA Ext MB/s speedup +BenchmarkHash5M 514.40 1975.17 3.84x +``` + +Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding, +endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes, +the other changes increased everything roughly 50%. + +## Support for AVX512 + +We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU): + +``` +$ benchcmp avx2.txt avx512.txt +benchmark AVX2 MB/s AVX512 MB/s speedup +BenchmarkHash5M 448.62 3498.20 7.80x +``` + +The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide). + +Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message. + +Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice. + +Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion: + +```go +import "github.com/minio/sha256-simd" + +func main() { + server := sha256.NewAvx512Server() + h512 := sha256.NewAvx512(server) + h512.Write(fileBlock) + digest := h512.Sum([]byte{}) +} +``` + +Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance: +* Have many go routines doing SHA256 calculations in parallel. +* Try to Write() messages in multiples of 64 bytes. +* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible). + +More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores. + +## Drop-In Replacement + +The following code snippet shows how you can use `github.com/minio/sha256-simd`. +This will automatically select the fastest method for the architecture on which it will be executed. + +```go +import "github.com/minio/sha256-simd" + +func main() { + ... + shaWriter := sha256.New() + io.Copy(shaWriter, file) + ... +} +``` + +## Performance + +Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB. + +| Processor | SIMD | Speed (MB/s) | +| --------------------------------- | ------- | ------------:| +| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 | +| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 | +| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 | + +## asm2plan9s + +In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. + +## Why and benefits + +One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server. + +Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc. + +## ARM SHA Extensions + +The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)). + + ``` + sha256h q2, q3, v9.4s + sha256h2 q3, q4, v9.4s + sha256su0 v5.4s, v6.4s + rev32 v8.16b, v8.16b + add v9.4s, v7.4s, v18.4s + mov v4.16b, v2.16b + sha256h q2, q3, v10.4s + sha256h2 q3, q4, v10.4s + sha256su0 v6.4s, v7.4s + sha256su1 v5.4s, v7.4s, v8.4s + ``` + +### Detailed benchmarks + +Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/). + +``` +minio@minio-arm:$ benchcmp golang.txt arm64.txt +benchmark golang arm64 speedup +BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x +BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x +BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x +BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x +``` + +## License + +Released under the Apache License v2.0. You can find the complete text in the file LICENSE. + +## Contributing + +Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/sha256-simd/cpuid_other.go b/vendor/github.com/minio/sha256-simd/cpuid_other.go new file mode 100644 index 00000000..cd9fbf2d --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_other.go @@ -0,0 +1,46 @@ +// Minio Cloud Storage, (C) 2021 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +import ( + "bytes" + "io/ioutil" + "runtime" + + "github.com/klauspost/cpuid/v2" +) + +func hasArmSha2() bool { + if cpuid.CPU.Has(cpuid.SHA2) { + return true + } + if runtime.GOARCH != "arm64" || runtime.GOOS != "linux" { + return false + } + + // Fall back to hacky cpuinfo parsing... + const procCPUInfo = "/proc/cpuinfo" + + // Feature to check for. + const sha256Feature = "sha2" + + cpuInfo, err := ioutil.ReadFile(procCPUInfo) + if err != nil { + return false + } + return bytes.Contains(cpuInfo, []byte(sha256Feature)) + +} diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go new file mode 100644 index 00000000..b137ead9 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256.go @@ -0,0 +1,399 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "crypto/sha256" + "encoding/binary" + "hash" + "runtime" + + "github.com/klauspost/cpuid/v2" +) + +// Size - The size of a SHA256 checksum in bytes. +const Size = 32 + +// BlockSize - The blocksize of SHA256 in bytes. +const BlockSize = 64 + +const ( + chunk = BlockSize + init0 = 0x6A09E667 + init1 = 0xBB67AE85 + init2 = 0x3C6EF372 + init3 = 0xA54FF53A + init4 = 0x510E527F + init5 = 0x9B05688C + init6 = 0x1F83D9AB + init7 = 0x5BE0CD19 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint32 + x [chunk]byte + nx int + len uint64 +} + +// Reset digest back to default +func (d *digest) Reset() { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + d.nx = 0 + d.len = 0 +} + +type blockfuncType int + +const ( + blockfuncGeneric blockfuncType = iota + blockfuncSha blockfuncType = iota + blockfuncArm blockfuncType = iota +) + +var blockfunc blockfuncType + +func init() { + blockfunc = blockfuncGeneric + switch { + case hasSHAExtensions(): + blockfunc = blockfuncSha + case hasArmSha2(): + blockfunc = blockfuncArm + default: + blockfunc = blockfuncGeneric + } +} + +var avx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL) + +// hasSHAExtensions return whether the cpu supports SHA extensions. +func hasSHAExtensions() bool { + return cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4) && runtime.GOARCH == "amd64" +} + +// New returns a new hash.Hash computing the SHA256 checksum. +func New() hash.Hash { + if blockfunc != blockfuncGeneric { + d := new(digest) + d.Reset() + return d + } + // Fallback to the standard golang implementation + // if no features were found. + return sha256.New() +} + +// Sum256 - single caller sha256 helper +func Sum256(data []byte) (result [Size]byte) { + var d digest + d.Reset() + d.Write(data) + result = d.checkSum() + return +} + +// Return size of checksum +func (d *digest) Size() int { return Size } + +// Return blocksize of checksum +func (d *digest) BlockSize() int { return BlockSize } + +// Write to digest +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Return sha256 sum in bytes +func (d *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d0 := *d + hash := d0.checkSum() + return append(in, hash[:]...) +} + +// Intermediate checksum function +func (d *digest) checkSum() (digest [Size]byte) { + n := d.nx + + var k [64]byte + copy(k[:], d.x[:n]) + + k[n] = 0x80 + + if n >= 56 { + block(d, k[:]) + + // clear block buffer - go compiles this to optimal 1x xorps + 4x movups + // unfortunately expressing this more succinctly results in much worse code + k[0] = 0 + k[1] = 0 + k[2] = 0 + k[3] = 0 + k[4] = 0 + k[5] = 0 + k[6] = 0 + k[7] = 0 + k[8] = 0 + k[9] = 0 + k[10] = 0 + k[11] = 0 + k[12] = 0 + k[13] = 0 + k[14] = 0 + k[15] = 0 + k[16] = 0 + k[17] = 0 + k[18] = 0 + k[19] = 0 + k[20] = 0 + k[21] = 0 + k[22] = 0 + k[23] = 0 + k[24] = 0 + k[25] = 0 + k[26] = 0 + k[27] = 0 + k[28] = 0 + k[29] = 0 + k[30] = 0 + k[31] = 0 + k[32] = 0 + k[33] = 0 + k[34] = 0 + k[35] = 0 + k[36] = 0 + k[37] = 0 + k[38] = 0 + k[39] = 0 + k[40] = 0 + k[41] = 0 + k[42] = 0 + k[43] = 0 + k[44] = 0 + k[45] = 0 + k[46] = 0 + k[47] = 0 + k[48] = 0 + k[49] = 0 + k[50] = 0 + k[51] = 0 + k[52] = 0 + k[53] = 0 + k[54] = 0 + k[55] = 0 + k[56] = 0 + k[57] = 0 + k[58] = 0 + k[59] = 0 + k[60] = 0 + k[61] = 0 + k[62] = 0 + k[63] = 0 + } + binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3) + block(d, k[:]) + + { + const i = 0 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 1 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 2 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 3 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 4 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 5 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 6 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 7 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + + return +} + +func block(dig *digest, p []byte) { + if blockfunc == blockfuncSha { + blockShaGo(dig, p) + } else if blockfunc == blockfuncArm { + blockArmGo(dig, p) + } else if blockfunc == blockfuncGeneric { + blockGeneric(dig, p) + } +} + +func blockGeneric(dig *digest, p []byte) { + var w [64]uint32 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + // Can interlace the computation of w with the + // rounds below if needed for speed. + for i := 0; i < 16; i++ { + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + } + for i := 16; i < 64; i++ { + v1 := w[i-2] + t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) + v2 := w[i-15] + t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 64; i++ { + t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} + +var _K = []uint32{ + 0x428a2f98, + 0x71374491, + 0xb5c0fbcf, + 0xe9b5dba5, + 0x3956c25b, + 0x59f111f1, + 0x923f82a4, + 0xab1c5ed5, + 0xd807aa98, + 0x12835b01, + 0x243185be, + 0x550c7dc3, + 0x72be5d74, + 0x80deb1fe, + 0x9bdc06a7, + 0xc19bf174, + 0xe49b69c1, + 0xefbe4786, + 0x0fc19dc6, + 0x240ca1cc, + 0x2de92c6f, + 0x4a7484aa, + 0x5cb0a9dc, + 0x76f988da, + 0x983e5152, + 0xa831c66d, + 0xb00327c8, + 0xbf597fc7, + 0xc6e00bf3, + 0xd5a79147, + 0x06ca6351, + 0x14292967, + 0x27b70a85, + 0x2e1b2138, + 0x4d2c6dfc, + 0x53380d13, + 0x650a7354, + 0x766a0abb, + 0x81c2c92e, + 0x92722c85, + 0xa2bfe8a1, + 0xa81a664b, + 0xc24b8b70, + 0xc76c51a3, + 0xd192e819, + 0xd6990624, + 0xf40e3585, + 0x106aa070, + 0x19a4c116, + 0x1e376c08, + 0x2748774c, + 0x34b0bcb5, + 0x391c0cb3, + 0x4ed8aa4a, + 0x5b9cca4f, + 0x682e6ff3, + 0x748f82ee, + 0x78a5636f, + 0x84c87814, + 0x8cc70208, + 0x90befffa, + 0xa4506ceb, + 0xbef9a3f7, + 0xc67178f2, +} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm new file mode 100644 index 00000000..c959b1aa --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm @@ -0,0 +1,686 @@ + +// 16x Parallel implementation of SHA256 for AVX512 + +// +// Minio Cloud Storage, (C) 2017 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// This code is based on the Intel Multi-Buffer Crypto for IPSec library +// and more specifically the following implementation: +// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm +// +// For Golang it has been converted into Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble the AVX512 instructions +// + +// Copyright (c) 2017, Intel Corporation +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of Intel Corporation nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#define SHA256_DIGEST_ROW_SIZE 64 + +// arg1 +#define STATE rdi +#define STATE_P9 DI +// arg2 +#define INP_SIZE rsi +#define INP_SIZE_P9 SI + +#define IDX rcx +#define TBL rdx +#define TBL_P9 DX + +#define INPUT rax +#define INPUT_P9 AX + +#define inp0 r9 +#define SCRATCH_P9 R12 +#define SCRATCH r12 +#define maskp r13 +#define MASKP_P9 R13 +#define mask r14 +#define MASK_P9 R14 + +#define A zmm0 +#define B zmm1 +#define C zmm2 +#define D zmm3 +#define E zmm4 +#define F zmm5 +#define G zmm6 +#define H zmm7 +#define T1 zmm8 +#define TMP0 zmm9 +#define TMP1 zmm10 +#define TMP2 zmm11 +#define TMP3 zmm12 +#define TMP4 zmm13 +#define TMP5 zmm14 +#define TMP6 zmm15 + +#define W0 zmm16 +#define W1 zmm17 +#define W2 zmm18 +#define W3 zmm19 +#define W4 zmm20 +#define W5 zmm21 +#define W6 zmm22 +#define W7 zmm23 +#define W8 zmm24 +#define W9 zmm25 +#define W10 zmm26 +#define W11 zmm27 +#define W12 zmm28 +#define W13 zmm29 +#define W14 zmm30 +#define W15 zmm31 + + +#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \ + \ + \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0} + \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0} + \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0} + \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0} + \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0} + \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0} + \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0} + \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0} + \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0} + \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0} + \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0} + \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0} + \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0} + \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0} + \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0} + \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0} + \ + \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} + \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} + \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} + \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} + \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} + \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} + \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} + \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} + \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} + \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} + \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} + \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} + \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} + \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} + \ + \ // process top half + vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0} + vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2} + vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0} + vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2} + \ + vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1} + vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2} + vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3} + vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0} + \ + \ // use r2 in place of t0 + vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0} + vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2} + vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0} + vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2} + \ + vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1} + vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2} + vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3} + vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0} + \ + \ // use r6 in place of t0 + vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0} + vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2} + vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0} + vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2} + \ + vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1} + vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2} + vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3} + vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0} + \ + \ // use r10 in place of t0 + vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0} + vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2} + vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00} + vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02} + \ + vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1} + vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2} + vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3} + vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0} + \ + \ // At this point, the registers that contain interesting data are: + \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12 + \ // Can use t1 and r14 as scratch registers + LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \ + LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \ + \ + vmovdqu32 _r14, [rbx] \ + vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0} + vmovdqu32 _t1, [r8] \ + vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4} + \ + vmovdqu32 _r2, [rbx] \ + vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1} + vmovdqu32 _t0, [r8] \ + vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5} + \ + vmovdqu32 _r3, [rbx] \ + vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2} + vmovdqu32 _r7, [r8] \ + vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6} + \ + vmovdqu32 _r1, [rbx] \ + vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3} + vmovdqu32 _r5, [r8] \ + vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7} + \ + vmovdqu32 _r0, [rbx] \ + vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0} + vmovdqu32 _r4, [r8] \ + vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4} + \ + vmovdqu32 _r6, [rbx] \ + vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1} + vmovdqu32 _r10, [r8] \ + vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5} + \ + vmovdqu32 _r11, [rbx] \ + vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2} + vmovdqu32 _r15, [r8] \ + vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6} + \ + vmovdqu32 _r9, [rbx] \ + vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3} + vmovdqu32 _r13, [r8] \ + vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7} + \ + \ // At this point r8 and r12 can be used as scratch registers + vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} + vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} + \ + vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} + vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} + \ + vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} + vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + \ + vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} + vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} + \ + vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} + vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} + \ + vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} + vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + \ + vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} + vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} + \ + vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} + vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} + \ + vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + + +// CH(A, B, C) = (A&B) ^ (~A&C) +// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G) +// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22 +// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25 +// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3 +// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10 + +// Main processing loop per round +#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \ + \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt + \ // T2 = SIGMA0(A) + MAJ(A, B, C) + \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2 + \ + \ // H becomes T2, then add T1 for A + \ // D becomes D + T1 for E + \ + vpaddd T1, _H, TMP3 \ // T1 = H + Kt + vmovdqu32 TMP0, _E \ + vprord TMP1, _E, 6 \ // ROR_6(E) + vprord TMP2, _E, 11 \ // ROR_11(E) + vprord TMP3, _E, 25 \ // ROR_25(E) + vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G) + vpaddd T1, T1, _WT \ // T1 = T1 + Wt + vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E) + vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G) + vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E) + vpaddd _D, _D, T1 \ // D = D + T1 + \ + vprord _H, _A, 2 \ // ROR_2(A) + vprord TMP2, _A, 13 \ // ROR_13(A) + vprord TMP3, _A, 22 \ // ROR_22(A) + vmovdqu32 TMP0, _A \ + vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C) + vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A) + vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C) + vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1 + \ + vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt + + +#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \ + vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2) + vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2) + vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2) + vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2) + \ + vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7 + \ + vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15) + vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15) + vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15) + vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15) + \ + vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + + \ // Wt-7 + sigma0(Wt-15) + + + +// Note this is reading in a block of data for one lane +// When all 16 are read, the data must be transposed to build msg schedule +#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \ + TESTQ $(1<(SB), TBL_P9 + vmovdqu32 TMP2, [TBL] + + // Get first K from table + MOVQ table+16(FP), TBL_P9 + vmovdqu32 TMP3, [TBL] + + // Save digests for later addition + vmovdqu32 [SCRATCH + 64*0], A + vmovdqu32 [SCRATCH + 64*1], B + vmovdqu32 [SCRATCH + 64*2], C + vmovdqu32 [SCRATCH + 64*3], D + vmovdqu32 [SCRATCH + 64*4], E + vmovdqu32 [SCRATCH + 64*5], F + vmovdqu32 [SCRATCH + 64*6], G + vmovdqu32 [SCRATCH + 64*7], H + + add IDX, 64 + + // Transpose input data + TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1) + + vpshufb W0, W0, TMP2 + vpshufb W1, W1, TMP2 + vpshufb W2, W2, TMP2 + vpshufb W3, W3, TMP2 + vpshufb W4, W4, TMP2 + vpshufb W5, W5, TMP2 + vpshufb W6, W6, TMP2 + vpshufb W7, W7, TMP2 + vpshufb W8, W8, TMP2 + vpshufb W9, W9, TMP2 + vpshufb W10, W10, TMP2 + vpshufb W11, W11, TMP2 + vpshufb W12, W12, TMP2 + vpshufb W13, W13, TMP2 + vpshufb W14, W14, TMP2 + vpshufb W15, W15, TMP2 + + // MSG Schedule for W0-W15 is now complete in registers + // Process first 48 rounds + // Calculate next Wt+16 after processing is complete and Wt is unneeded + + PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + + // Check if this is the last block + sub INP_SIZE, 1 + JE lastLoop + + // Load next mask for inputs + ADDQ $8, MASKP_P9 + MOVQ (MASKP_P9), MASK_P9 + + // Process last 16 rounds + // Read in next block msg data for use in first 16 words of msg sched + + PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_00_15( W0, 0, skipNext0) + PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_00_15( W1, 1, skipNext1) + PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_00_15( W2, 2, skipNext2) + PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_00_15( W3, 3, skipNext3) + PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_00_15( W4, 4, skipNext4) + PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_00_15( W5, 5, skipNext5) + PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_00_15( W6, 6, skipNext6) + PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_00_15( W7, 7, skipNext7) + PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_00_15( W8, 8, skipNext8) + PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_00_15( W9, 9, skipNext9) + PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_00_15(W10, 10, skipNext10) + PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_00_15(W11, 11, skipNext11) + PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_00_15(W12, 12, skipNext12) + PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_00_15(W13, 13, skipNext13) + PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_00_15(W14, 14, skipNext14) + PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_00_15(W15, 15, skipNext15) + + // Add old digest + vmovdqu32 TMP2, A + vmovdqu32 A, [SCRATCH + 64*0] + vpaddd A{k1}, A, TMP2 + vmovdqu32 TMP2, B + vmovdqu32 B, [SCRATCH + 64*1] + vpaddd B{k1}, B, TMP2 + vmovdqu32 TMP2, C + vmovdqu32 C, [SCRATCH + 64*2] + vpaddd C{k1}, C, TMP2 + vmovdqu32 TMP2, D + vmovdqu32 D, [SCRATCH + 64*3] + vpaddd D{k1}, D, TMP2 + vmovdqu32 TMP2, E + vmovdqu32 E, [SCRATCH + 64*4] + vpaddd E{k1}, E, TMP2 + vmovdqu32 TMP2, F + vmovdqu32 F, [SCRATCH + 64*5] + vpaddd F{k1}, F, TMP2 + vmovdqu32 TMP2, G + vmovdqu32 G, [SCRATCH + 64*6] + vpaddd G{k1}, G, TMP2 + vmovdqu32 TMP2, H + vmovdqu32 H, [SCRATCH + 64*7] + vpaddd H{k1}, H, TMP2 + + kmovq k1, mask + JMP lloop + +lastLoop: + // Process last 16 rounds + PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) + PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) + PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) + PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) + PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) + PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) + PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) + PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) + PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) + PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) + PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) + PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) + PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) + PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) + PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) + PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) + + // Add old digest + vmovdqu32 TMP2, A + vmovdqu32 A, [SCRATCH + 64*0] + vpaddd A{k1}, A, TMP2 + vmovdqu32 TMP2, B + vmovdqu32 B, [SCRATCH + 64*1] + vpaddd B{k1}, B, TMP2 + vmovdqu32 TMP2, C + vmovdqu32 C, [SCRATCH + 64*2] + vpaddd C{k1}, C, TMP2 + vmovdqu32 TMP2, D + vmovdqu32 D, [SCRATCH + 64*3] + vpaddd D{k1}, D, TMP2 + vmovdqu32 TMP2, E + vmovdqu32 E, [SCRATCH + 64*4] + vpaddd E{k1}, E, TMP2 + vmovdqu32 TMP2, F + vmovdqu32 F, [SCRATCH + 64*5] + vpaddd F{k1}, F, TMP2 + vmovdqu32 TMP2, G + vmovdqu32 G, [SCRATCH + 64*6] + vpaddd G{k1}, G, TMP2 + vmovdqu32 TMP2, H + vmovdqu32 H, [SCRATCH + 64*7] + vpaddd H{k1}, H, TMP2 + + // Write out digest + vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A + vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B + vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C + vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D + vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E + vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F + vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G + vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H + + VZEROUPPER + RET + +// +// Tables +// + +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b +GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 + +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D +GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 + +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F +GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go new file mode 100644 index 00000000..b7d7c163 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go @@ -0,0 +1,500 @@ +//+build !noasm,!appengine,gc + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "encoding/binary" + "errors" + "hash" + "sort" + "sync/atomic" + "time" +) + +//go:noescape +func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte) + +// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to +// differentiate with default initialiation value of 0 +const Avx512ServerUID = 16 + +var uidCounter uint64 + +// NewAvx512 - initialize sha256 Avx512 implementation. +func NewAvx512(a512srv *Avx512Server) hash.Hash { + uid := atomic.AddUint64(&uidCounter, 1) + return &Avx512Digest{uid: uid, a512srv: a512srv} +} + +// Avx512Digest - Type for computing SHA256 using Avx512 +type Avx512Digest struct { + uid uint64 + a512srv *Avx512Server + x [chunk]byte + nx int + len uint64 + final bool + result [Size]byte +} + +// Size - Return size of checksum +func (d *Avx512Digest) Size() int { return Size } + +// BlockSize - Return blocksize of checksum +func (d Avx512Digest) BlockSize() int { return BlockSize } + +// Reset - reset sha digest to its initial values +func (d *Avx512Digest) Reset() { + d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true} + d.nx = 0 + d.len = 0 + d.final = false +} + +// Write to digest +func (d *Avx512Digest) Write(p []byte) (nn int, err error) { + + if d.final { + return 0, errors.New("Avx512Digest already finalized. Reset first before writing again") + } + + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]} + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]} + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Sum - Return sha256 sum in bytes +func (d *Avx512Digest) Sum(in []byte) (result []byte) { + + if d.final { + return append(in, d.result[:]...) + } + + trail := make([]byte, 0, 128) + trail = append(trail, d.x[:d.nx]...) + + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + trail = append(trail, tmp[0:56-len%64]...) + } else { + trail = append(trail, tmp[0:64+56-len%64]...) + } + d.nx = 0 + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (56 - 8*i)) + } + trail = append(trail, tmp[0:8]...) + + sumCh := make(chan [Size]byte) + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh} + d.result = <-sumCh + d.final = true + return append(in, d.result[:]...) +} + +var table = [512]uint64{ + 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, + 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, + 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, + 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, + 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, + 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, + 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, + 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, + 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, + 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, + 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, + 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, + 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, + 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, + 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, + 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, + 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, + 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, + 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, + 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, + 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, + 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, + 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, + 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, + 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, + 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, + 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, + 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, + 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, + 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, + 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, + 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, + 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, + 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, + 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, + 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, + 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, + 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, + 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, + 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, + 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, + 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, + 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, + 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, + 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, + 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, + 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, + 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, + 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, + 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, + 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, + 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, + 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, + 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, + 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, + 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, + 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, + 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, + 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, + 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, + 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, + 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, + 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, + 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, + 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, + 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, + 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, + 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, + 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, + 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, + 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, + 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, + 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, + 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, + 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, + 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, + 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, + 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, + 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, + 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, + 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, + 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, + 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, + 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, + 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, + 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, + 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, + 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, + 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, + 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, + 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, + 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, + 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, + 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, + 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, + 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, + 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, + 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, + 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, + 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, + 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, + 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, + 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, + 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, + 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, + 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, + 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, + 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, + 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, + 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, + 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, + 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, + 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, + 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, + 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, + 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, + 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, + 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, + 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, + 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, + 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, + 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, + 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, + 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, + 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, + 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, + 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, + 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2} + +// Interface function to assembly ode +func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte { + + scratch := [512]byte{} + sha256X16Avx512(digests, &scratch, &table, mask, input) + + output := [16][Size]byte{} + for i := 0; i < 16; i++ { + output[i] = getDigest(i, digests[:]) + } + + return output +} + +func getDigest(index int, state []byte) (sum [Size]byte) { + for j := 0; j < 16; j += 2 { + for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size { + binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4])) + } + } + return +} + +// Message to send across input channel +type blockInput struct { + uid uint64 + msg []byte + reset bool + final bool + sumCh chan [Size]byte +} + +// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations +type Avx512Server struct { + blocksCh chan blockInput // Input channel + totalIn int // Total number of inputs waiting to be processed + lanes [16]Avx512LaneInfo // Array with info per lane (out of 16) + digests map[uint64][Size]byte // Map of uids to (interim) digest results +} + +// Avx512LaneInfo - Info for each lane +type Avx512LaneInfo struct { + uid uint64 // unique identification for this SHA processing + block []byte // input block to be processed + outputCh chan [Size]byte // channel for output result +} + +// NewAvx512Server - Create new object for parallel processing handling +func NewAvx512Server() *Avx512Server { + a512srv := &Avx512Server{} + a512srv.digests = make(map[uint64][Size]byte) + a512srv.blocksCh = make(chan blockInput) + + // Start a single thread for reading from the input channel + go a512srv.Process() + return a512srv +} + +// Process - Sole handler for reading from the input channel +func (a512srv *Avx512Server) Process() { + for { + select { + case block := <-a512srv.blocksCh: + if block.reset { + a512srv.reset(block.uid) + continue + } + index := block.uid & 0xf + // fmt.Println("Adding message:", block.uid, index) + + if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs + //fmt.Println("Invoking Blocks()") + a512srv.blocks() + } + a512srv.totalIn++ + a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg} + if block.final { + a512srv.lanes[index].outputCh = block.sumCh + } + if a512srv.totalIn == len(a512srv.lanes) { + // fmt.Println("Invoking Blocks() while FULL: ") + a512srv.blocks() + } + + // TODO: test with larger timeout + case <-time.After(1 * time.Microsecond): + for _, lane := range a512srv.lanes { + if lane.block != nil { // check if there is any input to process + // fmt.Println("Invoking Blocks() on TIMEOUT: ") + a512srv.blocks() + break // we are done + } + } + } + } +} + +// Do a reset for this calculation +func (a512srv *Avx512Server) reset(uid uint64) { + + // Check if there is a message still waiting to be processed (and remove if so) + for i, lane := range a512srv.lanes { + if lane.uid == uid { + if lane.block != nil { + a512srv.lanes[i] = Avx512LaneInfo{} // clear message + a512srv.totalIn-- + } + } + } + + // Delete entry from hash map + delete(a512srv.digests, uid) +} + +// Invoke assembly and send results back +func (a512srv *Avx512Server) blocks() { + + inputs := [16][]byte{} + for i := range inputs { + inputs[i] = a512srv.lanes[i].block + } + + mask := expandMask(genMask(inputs)) + outputs := blockAvx512(a512srv.getDigests(), inputs, mask) + + a512srv.totalIn = 0 + for i := 0; i < len(outputs); i++ { + uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh + a512srv.digests[uid] = outputs[i] + a512srv.lanes[i] = Avx512LaneInfo{} + + if outputCh != nil { + // Send back result + outputCh <- outputs[i] + delete(a512srv.digests, uid) // Delete entry from hashmap + } + } +} + +func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) { + a512srv.blocksCh <- blockInput{uid: uid, msg: p} + return len(p), nil +} + +// Sum - return sha256 sum in bytes for a given sum id. +func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte { + sumCh := make(chan [32]byte) + a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh} + return <-sumCh +} + +func (a512srv *Avx512Server) getDigests() *[512]byte { + digests := [512]byte{} + for i, lane := range a512srv.lanes { + a, ok := a512srv.digests[lane.uid] + if ok { + binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4])) + binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8])) + binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12])) + binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16])) + binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20])) + binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24])) + binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28])) + binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32])) + } else { + binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) + binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) + binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) + binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) + binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) + binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) + binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) + binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) + } + } + return &digests +} + +// Helper struct for sorting blocks based on length +type lane struct { + len uint + pos uint +} + +type lanes []lane + +func (lns lanes) Len() int { return len(lns) } +func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] } +func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len } + +// Helper struct for +type maskRounds struct { + mask uint64 + rounds uint64 +} + +func genMask(input [16][]byte) [16]maskRounds { + + // Sort on blocks length small to large + var sorted [16]lane + for c, inpt := range input { + sorted[c] = lane{uint(len(inpt)), uint(c)} + } + sort.Sort(lanes(sorted[:])) + + // Create mask array including 'rounds' between masks + m, round, index := uint64(0xffff), uint64(0), 0 + var mr [16]maskRounds + for _, s := range sorted { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round} + index++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + + return mr +} + +// TODO: remove function +func expandMask(mr [16]maskRounds) []uint64 { + size := uint64(0) + for _, r := range mr { + size += r.rounds + } + result, index := make([]uint64, size), 0 + for _, r := range mr { + for j := uint64(0); j < r.rounds; j++ { + result[index] = r.mask + index++ + } + } + return result +} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s new file mode 100644 index 00000000..275bcacb --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s @@ -0,0 +1,267 @@ +//+build !noasm,!appengine + +TEXT ·sha256X16Avx512(SB), 7, $0 + MOVQ digests+0(FP), DI + MOVQ scratch+8(FP), R12 + MOVQ mask_len+32(FP), SI + MOVQ mask_base+24(FP), R13 + MOVQ (R13), R14 + LONG $0x92fbc1c4; BYTE $0xce + LEAQ inputs+48(FP), AX + QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07 + MOVQ table+16(FP), DX + WORD $0x3148; BYTE $0xc9 + TESTQ $(1<<0), R14 + JE skipInput0 + MOVQ 0*24(AX), R9 + LONG $0x487cc162; WORD $0x0410; BYTE $0x09 + +skipInput0: + TESTQ $(1<<1), R14 + JE skipInput1 + MOVQ 1*24(AX), R9 + LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 + +skipInput1: + TESTQ $(1<<2), R14 + JE skipInput2 + MOVQ 2*24(AX), R9 + LONG $0x487cc162; WORD $0x1410; BYTE $0x09 + +skipInput2: + TESTQ $(1<<3), R14 + JE skipInput3 + MOVQ 3*24(AX), R9 + LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 + +skipInput3: + TESTQ $(1<<4), R14 + JE skipInput4 + MOVQ 4*24(AX), R9 + LONG $0x487cc162; WORD $0x2410; BYTE $0x09 + +skipInput4: + TESTQ $(1<<5), R14 + JE skipInput5 + MOVQ 5*24(AX), R9 + LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 + +skipInput5: + TESTQ $(1<<6), R14 + JE skipInput6 + MOVQ 6*24(AX), R9 + LONG $0x487cc162; WORD $0x3410; BYTE $0x09 + +skipInput6: + TESTQ $(1<<7), R14 + JE skipInput7 + MOVQ 7*24(AX), R9 + LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 + +skipInput7: + TESTQ $(1<<8), R14 + JE skipInput8 + MOVQ 8*24(AX), R9 + LONG $0x487c4162; WORD $0x0410; BYTE $0x09 + +skipInput8: + TESTQ $(1<<9), R14 + JE skipInput9 + MOVQ 9*24(AX), R9 + LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 + +skipInput9: + TESTQ $(1<<10), R14 + JE skipInput10 + MOVQ 10*24(AX), R9 + LONG $0x487c4162; WORD $0x1410; BYTE $0x09 + +skipInput10: + TESTQ $(1<<11), R14 + JE skipInput11 + MOVQ 11*24(AX), R9 + LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 + +skipInput11: + TESTQ $(1<<12), R14 + JE skipInput12 + MOVQ 12*24(AX), R9 + LONG $0x487c4162; WORD $0x2410; BYTE $0x09 + +skipInput12: + TESTQ $(1<<13), R14 + JE skipInput13 + MOVQ 13*24(AX), R9 + LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 + +skipInput13: + TESTQ $(1<<14), R14 + JE skipInput14 + MOVQ 14*24(AX), R9 + LONG $0x487c4162; WORD $0x3410; BYTE $0x09 + +skipInput14: + TESTQ $(1<<15), R14 + JE skipInput15 + MOVQ 15*24(AX), R9 + LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 + +skipInput15: +lloop: + LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX + LONG $0x487e7162; WORD $0x1a6f + MOVQ table+16(FP), DX + QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88 + LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX + LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 + QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005 + JE lastLoop + ADDQ $8, R13 + MOVQ (R13), R14 + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31 + TESTQ $(1<<0), R14 + JE skipNext0 + MOVQ 0*24(AX), R9 + LONG $0x487cc162; WORD $0x0410; BYTE $0x09 + +skipNext0: + QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32 + TESTQ $(1<<1), R14 + JE skipNext1 + MOVQ 1*24(AX), R9 + LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 + +skipNext1: + QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33 + TESTQ $(1<<2), R14 + JE skipNext2 + MOVQ 2*24(AX), R9 + LONG $0x487cc162; WORD $0x1410; BYTE $0x09 + +skipNext2: + QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34 + TESTQ $(1<<3), R14 + JE skipNext3 + MOVQ 3*24(AX), R9 + LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 + +skipNext3: + QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35 + TESTQ $(1<<4), R14 + JE skipNext4 + MOVQ 4*24(AX), R9 + LONG $0x487cc162; WORD $0x2410; BYTE $0x09 + +skipNext4: + QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36 + TESTQ $(1<<5), R14 + JE skipNext5 + MOVQ 5*24(AX), R9 + LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 + +skipNext5: + QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37 + TESTQ $(1<<6), R14 + JE skipNext6 + MOVQ 6*24(AX), R9 + LONG $0x487cc162; WORD $0x3410; BYTE $0x09 + +skipNext6: + QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38 + TESTQ $(1<<7), R14 + JE skipNext7 + MOVQ 7*24(AX), R9 + LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 + +skipNext7: + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39 + TESTQ $(1<<8), R14 + JE skipNext8 + MOVQ 8*24(AX), R9 + LONG $0x487c4162; WORD $0x0410; BYTE $0x09 + +skipNext8: + QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a + TESTQ $(1<<9), R14 + JE skipNext9 + MOVQ 9*24(AX), R9 + LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 + +skipNext9: + QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b + TESTQ $(1<<10), R14 + JE skipNext10 + MOVQ 10*24(AX), R9 + LONG $0x487c4162; WORD $0x1410; BYTE $0x09 + +skipNext10: + QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c + TESTQ $(1<<11), R14 + JE skipNext11 + MOVQ 11*24(AX), R9 + LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 + +skipNext11: + QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d + TESTQ $(1<<12), R14 + JE skipNext12 + MOVQ 12*24(AX), R9 + LONG $0x487c4162; WORD $0x2410; BYTE $0x09 + +skipNext12: + QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e + TESTQ $(1<<13), R14 + JE skipNext13 + MOVQ 13*24(AX), R9 + LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 + +skipNext13: + QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f + TESTQ $(1<<14), R14 + JE skipNext14 + MOVQ 14*24(AX), R9 + LONG $0x487c4162; WORD $0x3410; BYTE $0x09 + +skipNext14: + QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40 + TESTQ $(1<<15), R14 + JE skipNext15 + MOVQ 15*24(AX), R9 + LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 + +skipNext15: + QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1 + JMP lloop + +lastLoop: + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f + VZEROUPPER + RET + +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b +GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D +GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F +GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go new file mode 100644 index 00000000..bef94941 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go @@ -0,0 +1,6 @@ +//+build !noasm,!appengine,gc + +package sha256 + +//go:noescape +func blockSha(h *[8]uint32, message []uint8) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s new file mode 100644 index 00000000..909fc0ef --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s @@ -0,0 +1,266 @@ +//+build !noasm,!appengine + +// SHA intrinsic version of SHA256 + +// Kristofer Peterson, (C) 2018. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "textflag.h" + +DATA K<>+0x00(SB)/4, $0x428a2f98 +DATA K<>+0x04(SB)/4, $0x71374491 +DATA K<>+0x08(SB)/4, $0xb5c0fbcf +DATA K<>+0x0c(SB)/4, $0xe9b5dba5 +DATA K<>+0x10(SB)/4, $0x3956c25b +DATA K<>+0x14(SB)/4, $0x59f111f1 +DATA K<>+0x18(SB)/4, $0x923f82a4 +DATA K<>+0x1c(SB)/4, $0xab1c5ed5 +DATA K<>+0x20(SB)/4, $0xd807aa98 +DATA K<>+0x24(SB)/4, $0x12835b01 +DATA K<>+0x28(SB)/4, $0x243185be +DATA K<>+0x2c(SB)/4, $0x550c7dc3 +DATA K<>+0x30(SB)/4, $0x72be5d74 +DATA K<>+0x34(SB)/4, $0x80deb1fe +DATA K<>+0x38(SB)/4, $0x9bdc06a7 +DATA K<>+0x3c(SB)/4, $0xc19bf174 +DATA K<>+0x40(SB)/4, $0xe49b69c1 +DATA K<>+0x44(SB)/4, $0xefbe4786 +DATA K<>+0x48(SB)/4, $0x0fc19dc6 +DATA K<>+0x4c(SB)/4, $0x240ca1cc +DATA K<>+0x50(SB)/4, $0x2de92c6f +DATA K<>+0x54(SB)/4, $0x4a7484aa +DATA K<>+0x58(SB)/4, $0x5cb0a9dc +DATA K<>+0x5c(SB)/4, $0x76f988da +DATA K<>+0x60(SB)/4, $0x983e5152 +DATA K<>+0x64(SB)/4, $0xa831c66d +DATA K<>+0x68(SB)/4, $0xb00327c8 +DATA K<>+0x6c(SB)/4, $0xbf597fc7 +DATA K<>+0x70(SB)/4, $0xc6e00bf3 +DATA K<>+0x74(SB)/4, $0xd5a79147 +DATA K<>+0x78(SB)/4, $0x06ca6351 +DATA K<>+0x7c(SB)/4, $0x14292967 +DATA K<>+0x80(SB)/4, $0x27b70a85 +DATA K<>+0x84(SB)/4, $0x2e1b2138 +DATA K<>+0x88(SB)/4, $0x4d2c6dfc +DATA K<>+0x8c(SB)/4, $0x53380d13 +DATA K<>+0x90(SB)/4, $0x650a7354 +DATA K<>+0x94(SB)/4, $0x766a0abb +DATA K<>+0x98(SB)/4, $0x81c2c92e +DATA K<>+0x9c(SB)/4, $0x92722c85 +DATA K<>+0xa0(SB)/4, $0xa2bfe8a1 +DATA K<>+0xa4(SB)/4, $0xa81a664b +DATA K<>+0xa8(SB)/4, $0xc24b8b70 +DATA K<>+0xac(SB)/4, $0xc76c51a3 +DATA K<>+0xb0(SB)/4, $0xd192e819 +DATA K<>+0xb4(SB)/4, $0xd6990624 +DATA K<>+0xb8(SB)/4, $0xf40e3585 +DATA K<>+0xbc(SB)/4, $0x106aa070 +DATA K<>+0xc0(SB)/4, $0x19a4c116 +DATA K<>+0xc4(SB)/4, $0x1e376c08 +DATA K<>+0xc8(SB)/4, $0x2748774c +DATA K<>+0xcc(SB)/4, $0x34b0bcb5 +DATA K<>+0xd0(SB)/4, $0x391c0cb3 +DATA K<>+0xd4(SB)/4, $0x4ed8aa4a +DATA K<>+0xd8(SB)/4, $0x5b9cca4f +DATA K<>+0xdc(SB)/4, $0x682e6ff3 +DATA K<>+0xe0(SB)/4, $0x748f82ee +DATA K<>+0xe4(SB)/4, $0x78a5636f +DATA K<>+0xe8(SB)/4, $0x84c87814 +DATA K<>+0xec(SB)/4, $0x8cc70208 +DATA K<>+0xf0(SB)/4, $0x90befffa +DATA K<>+0xf4(SB)/4, $0xa4506ceb +DATA K<>+0xf8(SB)/4, $0xbef9a3f7 +DATA K<>+0xfc(SB)/4, $0xc67178f2 +GLOBL K<>(SB), RODATA|NOPTR, $256 + +DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203 +DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b +GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16 + +// Register Usage +// BX base address of constant table (constant) +// DX hash_state (constant) +// SI hash_data.data +// DI hash_data.data + hash_data.length - 64 (constant) +// X0 scratch +// X1 scratch +// X2 working hash state // ABEF +// X3 working hash state // CDGH +// X4 first 16 bytes of block +// X5 second 16 bytes of block +// X6 third 16 bytes of block +// X7 fourth 16 bytes of block +// X12 saved hash state // ABEF +// X13 saved hash state // CDGH +// X15 data shuffle mask (constant) + +TEXT ·blockSha(SB), NOSPLIT, $0-32 + MOVQ h+0(FP), DX + MOVQ message_base+8(FP), SI + MOVQ message_len+16(FP), DI + LEAQ -64(SI)(DI*1), DI + MOVOU (DX), X2 + MOVOU 16(DX), X1 + MOVO X2, X3 + PUNPCKLLQ X1, X2 + PUNPCKHLQ X1, X3 + PSHUFD $0x27, X2, X2 + PSHUFD $0x27, X3, X3 + MOVO SHUF_MASK<>(SB), X15 + LEAQ K<>(SB), BX + + JMP TEST + +LOOP: + MOVO X2, X12 + MOVO X3, X13 + + // load block and shuffle + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOU 32(SI), X6 + MOVOU 48(SI), X7 + PSHUFB X15, X4 + PSHUFB X15, X5 + PSHUFB X15, X6 + PSHUFB X15, X7 + +#define ROUND456 \ + PADDL X5, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X5, X1 \ + LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4 + PADDL X1, X6 \ + LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 + +#define ROUND567 \ + PADDL X6, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X6, X1 \ + LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4 + PADDL X1, X7 \ + LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 + +#define ROUND674 \ + PADDL X7, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X7, X1 \ + LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4 + PADDL X1, X4 \ + LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7 + +#define ROUND745 \ + PADDL X4, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X4, X1 \ + LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4 + PADDL X1, X5 \ + LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4 + + // rounds 0-3 + MOVO (BX), X0 + PADDL X4, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 4-7 + MOVO 1*16(BX), X0 + PADDL X5, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 + + // rounds 8-11 + MOVO 2*16(BX), X0 + PADDL X6, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 + + MOVO 3*16(BX), X0; ROUND674 // rounds 12-15 + MOVO 4*16(BX), X0; ROUND745 // rounds 16-19 + MOVO 5*16(BX), X0; ROUND456 // rounds 20-23 + MOVO 6*16(BX), X0; ROUND567 // rounds 24-27 + MOVO 7*16(BX), X0; ROUND674 // rounds 28-31 + MOVO 8*16(BX), X0; ROUND745 // rounds 32-35 + MOVO 9*16(BX), X0; ROUND456 // rounds 36-39 + MOVO 10*16(BX), X0; ROUND567 // rounds 40-43 + MOVO 11*16(BX), X0; ROUND674 // rounds 44-47 + MOVO 12*16(BX), X0; ROUND745 // rounds 48-51 + + // rounds 52-55 + MOVO 13*16(BX), X0 + PADDL X5, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + MOVO X5, X1 + LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4 + PADDL X1, X6 + LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 56-59 + MOVO 14*16(BX), X0 + PADDL X6, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + MOVO X6, X1 + LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4 + PADDL X1, X7 + LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 60-63 + MOVO 15*16(BX), X0 + PADDL X7, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + PADDL X12, X2 + PADDL X13, X3 + + ADDQ $64, SI + +TEST: + CMPQ SI, DI + JBE LOOP + + PSHUFD $0x4e, X3, X0 + LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0 + PSHUFD $0x4e, X2, X1 + LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f + PSHUFD $0x1b, X0, X0 + PSHUFD $0x1b, X1, X1 + + MOVOU X0, (DX) + MOVOU X1, 16(DX) + + RET diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go new file mode 100644 index 00000000..0c48d45f --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go @@ -0,0 +1,27 @@ +//+build !noasm,!appengine,gc + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockArmGo(dig *digest, p []byte) { + panic("blockArmGo called unexpectedly") +} + +func blockShaGo(dig *digest, p []byte) { + blockSha(&dig.h, p) +} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go new file mode 100644 index 00000000..58ccf6eb --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go @@ -0,0 +1,36 @@ +//+build !noasm,!appengine,gc + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockShaGo(dig *digest, p []byte) { + panic("blockShaGoc called unexpectedly") +} + +//go:noescape +func blockArm(h []uint32, message []uint8) + +func blockArmGo(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockArm(h[:], p[:]) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], + h[5], h[6], h[7] +} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s new file mode 100644 index 00000000..c6ddb371 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s @@ -0,0 +1,192 @@ +//+build !noasm,!appengine + +// ARM64 version of SHA256 + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// Based on implementation as found in https://github.com/jocover/sha256-armv8 +// +// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to +// their Plan9 equivalents +// + +TEXT ·blockArm(SB), 7, $0 + MOVD h+0(FP), R0 + MOVD message+24(FP), R1 + MOVD message_len+32(FP), R2 // length of message + SUBS $64, R2 + BMI complete + + // Load constants table pointer + MOVD $·constants(SB), R3 + + // Cache constants table in registers v16 - v31 + WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64 + WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16 + WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64 + + WORD $0x4c407801 // ld1 {v1.4s}, [x0] + WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64 + WORD $0xd1004000 // sub x0, x0, #0x10 + WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64 + +loop: + // Main loop + WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64 + WORD $0x4ea01c02 // mov v2.16b, v0.16b + WORD $0x4ea11c23 // mov v3.16b, v1.16b + WORD $0x6e2008a5 // rev32 v5.16b, v5.16b + WORD $0x6e2008c6 // rev32 v6.16b, v6.16b + WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s + WORD $0x6e2008e7 // rev32 v7.16b, v7.16b + WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x6e200908 // rev32 v8.16b, v8.16b + WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + + SUBS $64, R2 + BPL loop + + // Store result + WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0] + +complete: + RET + +// Constants table +DATA ·constants+0x0(SB)/8, $0x71374491428a2f98 +DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf +DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b +DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4 +DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98 +DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be +DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74 +DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7 +DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1 +DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6 +DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f +DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc +DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152 +DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8 +DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3 +DATA ·constants+0x78(SB)/8, $0x1429296706ca6351 +DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85 +DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc +DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354 +DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e +DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1 +DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70 +DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819 +DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585 +DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116 +DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c +DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f +DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee +DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814 +DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa +DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7 + +GLOBL ·constants(SB), 8, $256 + diff --git a/vendor/github.com/minio/sha256-simd/sha256block_other.go b/vendor/github.com/minio/sha256-simd/sha256block_other.go new file mode 100644 index 00000000..ec586c06 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_other.go @@ -0,0 +1,28 @@ +//+build appengine noasm !amd64,!arm64 !gc + +/* + * Minio Cloud Storage, (C) 2019 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockShaGo(dig *digest, p []byte) { + panic("blockShaGo called unexpectedly") + +} + +func blockArmGo(dig *digest, p []byte) { + panic("blockArmGo called unexpectedly") +} diff --git a/vendor/github.com/minio/sha256-simd/test-architectures.sh b/vendor/github.com/minio/sha256-simd/test-architectures.sh new file mode 100644 index 00000000..50150eaa --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/test-architectures.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +go tool dist list | while IFS=/ read os arch; do + echo "Checking $os/$arch..." + echo " normal" + GOARCH=$arch GOOS=$os go build -o /dev/null ./... + echo " noasm" + GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null ./... + echo " appengine" + GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null ./... + echo " noasm,appengine" + GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null ./... +done diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/github.com/mitchellh/go-homedir/LICENSE similarity index 94% rename from vendor/go.uber.org/atomic/LICENSE.txt rename to vendor/github.com/mitchellh/go-homedir/LICENSE index 8765c9fb..f9c841a5 100644 --- a/vendor/go.uber.org/atomic/LICENSE.txt +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2016 Uber Technologies, Inc. +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 00000000..d70706d5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 00000000..25378537 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore new file mode 100644 index 00000000..c57100a5 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.gitignore @@ -0,0 +1 @@ +coverage.txt diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml new file mode 100644 index 00000000..b950e429 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml @@ -0,0 +1,20 @@ +language: go + +matrix: + include: + - go: "1.13.x" + - go: "1.14.x" + - go: "tip" + env: + - LINT=true + - COVERAGE=true + +install: + - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi + - go get -u github.com/stretchr/testify/... + +script: + - make test + - go build ./... + - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi + - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md new file mode 100644 index 00000000..d3bfcf62 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md @@ -0,0 +1,63 @@ +Changes by Version +================== + + +1.2.0 (2020-07-01) +------------------- + +* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro +* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed +* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena +* Add Go module support (#215) -- Zaba505 +* Make SetTag helper types in ext public (#229) -- Blake Edwards +* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov +* Improve noop impementation (#223) -- chanxuehong +* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak +* Fix typo in comments (#222) -- meteorlxy +* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅 +* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad + + +1.1.0 (2019-03-23) +------------------- + +Notable changes: +- The library is now released under Apache 2.0 license +- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159)) +- 'golang.org/x/net/context' is replaced with 'context' from the standard library + +List of all changes: + +- Export StartSpanFromContextWithTracer (#214) +- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) +- Use Set() instead of Add() in HTTPHeadersCarrier (#191) +- Update license to Apache 2.0 (#181) +- Replace 'golang.org/x/net/context' with 'context' (#176) +- Port of Python opentracing/harness/api_check.py to Go (#146) +- Fix race condition in MockSpan.Context() (#170) +- Add PeerHostIPv4.SetString() (#155) +- Add a Noop log field type to log to allow for optional fields (#150) + + +1.0.2 (2017-04-26) +------------------- + +- Add more semantic tags (#139) + + +1.0.1 (2017-02-06) +------------------- + +- Correct spelling in comments +- Address race in nextMockID() (#123) +- log: avoid panic marshaling nil error (#131) +- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) +- Drop Go 1.5 that fails in Travis (#129) +- Add convenience methods Key() and Value() to log.Field +- Add convenience methods to log.Field (2 years, 6 months ago) + +1.0.0 (2016-09-26) +------------------- + +- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) + diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE new file mode 100644 index 00000000..f0027349 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile new file mode 100644 index 00000000..62abb63f --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/Makefile @@ -0,0 +1,20 @@ +.DEFAULT_GOAL := test-and-lint + +.PHONY: test-and-lint +test-and-lint: test lint + +.PHONY: test +test: + go test -v -cover -race ./... + +.PHONY: cover +cover: + go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... + +.PHONY: lint +lint: + go fmt ./... + golint ./... + @# Run again with magic to exit non-zero if golint outputs anything. + @! (golint ./... | read dummy) + go vet ./... diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md new file mode 100644 index 00000000..6ef1d7c9 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -0,0 +1,171 @@ +[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) + +# OpenTracing API for Go + +This package is a Go platform API for OpenTracing. + +## Required Reading + +In order to understand the Go platform API, one must first be familiar with the +[OpenTracing project](https://opentracing.io) and +[terminology](https://opentracing.io/specification/) more specifically. + +## API overview for those adding instrumentation + +Everyday consumers of this `opentracing` package really only need to worry +about a couple of key abstractions: the `StartSpan` function, the `Span` +interface, and binding a `Tracer` at `main()`-time. Here are code snippets +demonstrating some important use cases. + +#### Singleton initialization + +The simplest starting point is `./default_tracer.go`. As early as possible, call + +```go + import "github.com/opentracing/opentracing-go" + import ".../some_tracing_impl" + + func main() { + opentracing.SetGlobalTracer( + // tracing impl specific: + some_tracing_impl.New(...), + ) + ... + } +``` + +#### Non-Singleton initialization + +If you prefer direct control to singletons, manage ownership of the +`opentracing.Tracer` implementation explicitly. + +#### Creating a Span given an existing Go `context.Context` + +If you use `context.Context` in your application, OpenTracing's Go library will +happily rely on it for `Span` propagation. To start a new (blocking child) +`Span`, you can use `StartSpanFromContext`. + +```go + func xyz(ctx context.Context, ...) { + ... + span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") + defer span.Finish() + span.LogFields( + log.String("event", "soft error"), + log.String("type", "cache timeout"), + log.Int("waited.millis", 1500)) + ... + } +``` + +#### Starting an empty trace by creating a "root span" + +It's always possible to create a "root" `Span` with no parent or other causal +reference. + +```go + func xyz() { + ... + sp := opentracing.StartSpan("operation_name") + defer sp.Finish() + ... + } +``` + +#### Creating a (child) Span given an existing (parent) Span + +```go + func xyz(parentSpan opentracing.Span, ...) { + ... + sp := opentracing.StartSpan( + "operation_name", + opentracing.ChildOf(parentSpan.Context())) + defer sp.Finish() + ... + } +``` + +#### Serializing to the wire + +```go + func makeSomeRequest(ctx context.Context) ... { + if span := opentracing.SpanFromContext(ctx); span != nil { + httpClient := &http.Client{} + httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) + + // Transmit the span's TraceContext as HTTP headers on our + // outbound request. + opentracing.GlobalTracer().Inject( + span.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(httpReq.Header)) + + resp, err := httpClient.Do(httpReq) + ... + } + ... + } +``` + +#### Deserializing from the wire + +```go + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + var serverSpan opentracing.Span + appSpecificOperationName := ... + wireContext, err := opentracing.GlobalTracer().Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(req.Header)) + if err != nil { + // Optionally record something about err here + } + + // Create the span referring to the RPC client if available. + // If wireContext == nil, a root span will be created. + serverSpan = opentracing.StartSpan( + appSpecificOperationName, + ext.RPCServerOption(wireContext)) + + defer serverSpan.Finish() + + ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) + ... + } +``` + +#### Conditionally capture a field using `log.Noop` + +In some situations, you may want to dynamically decide whether or not +to log a field. For example, you may want to capture additional data, +such as a customer ID, in non-production environments: + +```go + func Customer(order *Order) log.Field { + if os.Getenv("ENVIRONMENT") == "dev" { + return log.String("customer", order.Customer.ID) + } + return log.Noop() + } +``` + +#### Goroutine-safety + +The entire public API is goroutine-safe and does not require external +synchronization. + +## API pointers for those implementing a tracing system + +Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. + +## API compatibility + +For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. + +## Tracer test suite + +A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. + +## Licensing + +[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go new file mode 100644 index 00000000..e11977eb --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext.go @@ -0,0 +1,24 @@ +package opentracing + +import ( + "context" +) + +// TracerContextWithSpanExtension is an extension interface that the +// implementation of the Tracer interface may want to implement. It +// allows to have some control over the go context when the +// ContextWithSpan is invoked. +// +// The primary purpose of this extension are adapters from opentracing +// API to some other tracing API. +type TracerContextWithSpanExtension interface { + // ContextWithSpanHook gets called by the ContextWithSpan + // function, when the Tracer implementation also implements + // this interface. It allows to put extra information into the + // context and make it available to the callers of the + // ContextWithSpan. + // + // This hook is invoked before the ContextWithSpan function + // actually puts the span into the context. + ContextWithSpanHook(ctx context.Context, span Span) context.Context +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go new file mode 100644 index 00000000..8282bd75 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/field.go @@ -0,0 +1,17 @@ +package ext + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/log" +) + +// LogError sets the error=true tag on the Span and logs err as an "error" event. +func LogError(span opentracing.Span, err error, fields ...log.Field) { + Error.Set(span, true) + ef := []log.Field{ + log.Event("error"), + log.Error(err), + } + ef = append(ef, fields...) + span.LogFields(ef...) +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go new file mode 100644 index 00000000..a414b595 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -0,0 +1,215 @@ +package ext + +import "github.com/opentracing/opentracing-go" + +// These constants define common tag names recommended for better portability across +// tracing systems and languages/platforms. +// +// The tag names are defined as typed strings, so that in addition to the usual use +// +// span.setTag(TagName, value) +// +// they also support value type validation via this additional syntax: +// +// TagName.Set(span, value) +// +var ( + ////////////////////////////////////////////////////////////////////// + // SpanKind (client/server or producer/consumer) + ////////////////////////////////////////////////////////////////////// + + // SpanKind hints at relationship between spans, e.g. client/server + SpanKind = spanKindTagName("span.kind") + + // SpanKindRPCClient marks a span representing the client-side of an RPC + // or other remote call + SpanKindRPCClientEnum = SpanKindEnum("client") + SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} + + // SpanKindRPCServer marks a span representing the server-side of an RPC + // or other remote call + SpanKindRPCServerEnum = SpanKindEnum("server") + SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} + + // SpanKindProducer marks a span representing the producer-side of a + // message bus + SpanKindProducerEnum = SpanKindEnum("producer") + SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} + + // SpanKindConsumer marks a span representing the consumer-side of a + // message bus + SpanKindConsumerEnum = SpanKindEnum("consumer") + SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} + + ////////////////////////////////////////////////////////////////////// + // Component name + ////////////////////////////////////////////////////////////////////// + + // Component is a low-cardinality identifier of the module, library, + // or package that is generating a span. + Component = StringTagName("component") + + ////////////////////////////////////////////////////////////////////// + // Sampling hint + ////////////////////////////////////////////////////////////////////// + + // SamplingPriority determines the priority of sampling this Span. + SamplingPriority = Uint16TagName("sampling.priority") + + ////////////////////////////////////////////////////////////////////// + // Peer tags. These tags can be emitted by either client-side or + // server-side to describe the other side/service in a peer-to-peer + // communications, like an RPC call. + ////////////////////////////////////////////////////////////////////// + + // PeerService records the service name of the peer. + PeerService = StringTagName("peer.service") + + // PeerAddress records the address name of the peer. This may be a "ip:port", + // a bare "hostname", a FQDN or even a database DSN substring + // like "mysql://username@127.0.0.1:3306/dbname" + PeerAddress = StringTagName("peer.address") + + // PeerHostname records the host name of the peer + PeerHostname = StringTagName("peer.hostname") + + // PeerHostIPv4 records IP v4 host address of the peer + PeerHostIPv4 = IPv4TagName("peer.ipv4") + + // PeerHostIPv6 records IP v6 host address of the peer + PeerHostIPv6 = StringTagName("peer.ipv6") + + // PeerPort records port number of the peer + PeerPort = Uint16TagName("peer.port") + + ////////////////////////////////////////////////////////////////////// + // HTTP Tags + ////////////////////////////////////////////////////////////////////// + + // HTTPUrl should be the URL of the request being handled in this segment + // of the trace, in standard URI format. The protocol is optional. + HTTPUrl = StringTagName("http.url") + + // HTTPMethod is the HTTP method of the request, and is case-insensitive. + HTTPMethod = StringTagName("http.method") + + // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the + // HTTP response. + HTTPStatusCode = Uint16TagName("http.status_code") + + ////////////////////////////////////////////////////////////////////// + // DB Tags + ////////////////////////////////////////////////////////////////////// + + // DBInstance is database instance name. + DBInstance = StringTagName("db.instance") + + // DBStatement is a database statement for the given database type. + // It can be a query or a prepared statement (i.e., before substitution). + DBStatement = StringTagName("db.statement") + + // DBType is a database type. For any SQL database, "sql". + // For others, the lower-case database category, e.g. "redis" + DBType = StringTagName("db.type") + + // DBUser is a username for accessing database. + DBUser = StringTagName("db.user") + + ////////////////////////////////////////////////////////////////////// + // Message Bus Tag + ////////////////////////////////////////////////////////////////////// + + // MessageBusDestination is an address at which messages can be exchanged + MessageBusDestination = StringTagName("message_bus.destination") + + ////////////////////////////////////////////////////////////////////// + // Error Tag + ////////////////////////////////////////////////////////////////////// + + // Error indicates that operation represented by the span resulted in an error. + Error = BoolTagName("error") +) + +// --- + +// SpanKindEnum represents common span types +type SpanKindEnum string + +type spanKindTagName string + +// Set adds a string tag to the `span` +func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { + span.SetTag(string(tag), value) +} + +type rpcServerOption struct { + clientContext opentracing.SpanContext +} + +func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { + if r.clientContext != nil { + opentracing.ChildOf(r.clientContext).Apply(o) + } + SpanKindRPCServer.Apply(o) +} + +// RPCServerOption returns a StartSpanOption appropriate for an RPC server span +// with `client` representing the metadata for the remote peer Span if available. +// In case client == nil, due to the client not being instrumented, this RPC +// server span will be a root span. +func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { + return rpcServerOption{client} +} + +// --- + +// StringTagName is a common tag name to be set to a string value +type StringTagName string + +// Set adds a string tag to the `span` +func (tag StringTagName) Set(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} + +// --- + +// Uint32TagName is a common tag name to be set to a uint32 value +type Uint32TagName string + +// Set adds a uint32 tag to the `span` +func (tag Uint32TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// --- + +// Uint16TagName is a common tag name to be set to a uint16 value +type Uint16TagName string + +// Set adds a uint16 tag to the `span` +func (tag Uint16TagName) Set(span opentracing.Span, value uint16) { + span.SetTag(string(tag), value) +} + +// --- + +// BoolTagName is a common tag name to be set to a bool value +type BoolTagName string + +// Set adds a bool tag to the `span` +func (tag BoolTagName) Set(span opentracing.Span, value bool) { + span.SetTag(string(tag), value) +} + +// IPv4TagName is a common tag name to be set to an ipv4 value +type IPv4TagName string + +// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility +func (tag IPv4TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" +func (tag IPv4TagName) SetString(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go new file mode 100644 index 00000000..4f7066a9 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -0,0 +1,42 @@ +package opentracing + +type registeredTracer struct { + tracer Tracer + isRegistered bool +} + +var ( + globalTracer = registeredTracer{NoopTracer{}, false} +) + +// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by +// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an +// opentracing.Tracer instance) should call SetGlobalTracer as early as +// possible in main(), prior to calling the `StartSpan` global func below. +// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` +// (etc) globals are noops. +func SetGlobalTracer(tracer Tracer) { + globalTracer = registeredTracer{tracer, true} +} + +// GlobalTracer returns the global singleton `Tracer` implementation. +// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop +// implementation that drops all data handed to it. +func GlobalTracer() Tracer { + return globalTracer.tracer +} + +// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. +func StartSpan(operationName string, opts ...StartSpanOption) Span { + return globalTracer.tracer.StartSpan(operationName, opts...) +} + +// InitGlobalTracer is deprecated. Please use SetGlobalTracer. +func InitGlobalTracer(tracer Tracer) { + SetGlobalTracer(tracer) +} + +// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered +func IsGlobalTracerRegistered() bool { + return globalTracer.isRegistered +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go new file mode 100644 index 00000000..1831bc9b --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -0,0 +1,65 @@ +package opentracing + +import "context" + +type contextKey struct{} + +var activeSpanKey = contextKey{} + +// ContextWithSpan returns a new `context.Context` that holds a reference to +// the span. If span is nil, a new context without an active span is returned. +func ContextWithSpan(ctx context.Context, span Span) context.Context { + if span != nil { + if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { + ctx = tracerWithHook.ContextWithSpanHook(ctx, span) + } + } + return context.WithValue(ctx, activeSpanKey, span) +} + +// SpanFromContext returns the `Span` previously associated with `ctx`, or +// `nil` if no such `Span` could be found. +// +// NOTE: context.Context != SpanContext: the former is Go's intra-process +// context propagation mechanism, and the latter houses OpenTracing's per-Span +// identity and baggage information. +func SpanFromContext(ctx context.Context) Span { + val := ctx.Value(activeSpanKey) + if sp, ok := val.(Span); ok { + return sp + } + return nil +} + +// StartSpanFromContext starts and returns a Span with `operationName`, using +// any Span found within `ctx` as a ChildOfRef. If no such parent could be +// found, StartSpanFromContext creates a root (parentless) Span. +// +// The second return value is a context.Context object built around the +// returned Span. +// +// Example usage: +// +// SomeFunction(ctx context.Context, ...) { +// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") +// defer sp.Finish() +// ... +// } +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { + return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) +} + +// StartSpanFromContextWithTracer starts and returns a span with `operationName` +// using a span found within the context as a ChildOfRef. If that doesn't exist +// it creates a root span. It also returns a context.Context object built +// around the returned span. +// +// It's behavior is identical to StartSpanFromContext except that it takes an explicit +// tracer as opposed to using the global tracer. +func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { + if parentSpan := SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, ChildOf(parentSpan.Context())) + } + span := tracer.StartSpan(operationName, opts...) + return span, ContextWithSpan(ctx, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go new file mode 100644 index 00000000..f222ded7 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -0,0 +1,282 @@ +package log + +import ( + "fmt" + "math" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + intType + int32Type + uint32Type + int64Type + uint64Type + float32Type + float64Type + errorType + objectType + lazyLoggerType + noopType +) + +// Field instances are constructed via LogBool, LogString, and so on. +// Tracing implementations may then handle them via the Field.Marshal +// method. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/uber-go/zap +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string + interfaceVal interface{} +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// Int adds an int-valued key:value pair to a Span.LogFields() record +func Int(key string, val int) Field { + return Field{ + key: key, + fieldType: intType, + numericVal: int64(val), + } +} + +// Int32 adds an int32-valued key:value pair to a Span.LogFields() record +func Int32(key string, val int32) Field { + return Field{ + key: key, + fieldType: int32Type, + numericVal: int64(val), + } +} + +// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record +func Uint32(key string, val uint32) Field { + return Field{ + key: key, + fieldType: uint32Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Float32 adds a float32-valued key:value pair to a Span.LogFields() record +func Float32(key string, val float32) Field { + return Field{ + key: key, + fieldType: float32Type, + numericVal: int64(math.Float32bits(val)), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Error adds an error with the key "error.object" to a Span.LogFields() record +func Error(err error) Field { + return Field{ + key: "error.object", + fieldType: errorType, + interfaceVal: err, + } +} + +// Object adds an object-valued key:value pair to a Span.LogFields() record +// Please pass in an immutable object, otherwise there may be concurrency issues. +// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write". +// Because span is sent asynchronously, it is possible that this map will also be modified. +func Object(key string, obj interface{}) Field { + return Field{ + key: key, + fieldType: objectType, + interfaceVal: obj, + } +} + +// Event creates a string-valued Field for span logs with key="event" and value=val. +func Event(val string) Field { + return String("event", val) +} + +// Message creates a string-valued Field for span logs with key="message" and value=val. +func Message(val string) Field { + return String("message", val) +} + +// LazyLogger allows for user-defined, late-bound logging of arbitrary data +type LazyLogger func(fv Encoder) + +// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing +// implementation will call the LazyLogger function at an indefinite time in +// the future (after Lazy() returns). +func Lazy(ll LazyLogger) Field { + return Field{ + fieldType: lazyLoggerType, + interfaceVal: ll, + } +} + +// Noop creates a no-op log field that should be ignored by the tracer. +// It can be used to capture optional fields, for example those that should +// only be logged in non-production environment: +// +// func customerField(order *Order) log.Field { +// if os.Getenv("ENVIRONMENT") == "dev" { +// return log.String("customer", order.Customer.ID) +// } +// return log.Noop() +// } +// +// span.LogFields(log.String("event", "purchase"), customerField(order)) +// +func Noop() Field { + return Field{ + fieldType: noopType, + } +} + +// Encoder allows access to the contents of a Field (via a call to +// Field.Marshal). +// +// Tracer implementations typically provide an implementation of Encoder; +// OpenTracing callers typically do not need to concern themselves with it. +type Encoder interface { + EmitString(key, value string) + EmitBool(key string, value bool) + EmitInt(key string, value int) + EmitInt32(key string, value int32) + EmitInt64(key string, value int64) + EmitUint32(key string, value uint32) + EmitUint64(key string, value uint64) + EmitFloat32(key string, value float32) + EmitFloat64(key string, value float64) + EmitObject(key string, value interface{}) + EmitLazyLogger(value LazyLogger) +} + +// Marshal passes a Field instance through to the appropriate +// field-type-specific method of an Encoder. +func (lf Field) Marshal(visitor Encoder) { + switch lf.fieldType { + case stringType: + visitor.EmitString(lf.key, lf.stringVal) + case boolType: + visitor.EmitBool(lf.key, lf.numericVal != 0) + case intType: + visitor.EmitInt(lf.key, int(lf.numericVal)) + case int32Type: + visitor.EmitInt32(lf.key, int32(lf.numericVal)) + case int64Type: + visitor.EmitInt64(lf.key, int64(lf.numericVal)) + case uint32Type: + visitor.EmitUint32(lf.key, uint32(lf.numericVal)) + case uint64Type: + visitor.EmitUint64(lf.key, uint64(lf.numericVal)) + case float32Type: + visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) + case float64Type: + visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) + case errorType: + if err, ok := lf.interfaceVal.(error); ok { + visitor.EmitString(lf.key, err.Error()) + } else { + visitor.EmitString(lf.key, "") + } + case objectType: + visitor.EmitObject(lf.key, lf.interfaceVal) + case lazyLoggerType: + visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + case noopType: + // intentionally left blank + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case intType: + return int(lf.numericVal) + case int32Type: + return int32(lf.numericVal) + case int64Type: + return int64(lf.numericVal) + case uint32Type: + return uint32(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case float32Type: + return math.Float32frombits(uint32(lf.numericVal)) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + case errorType, objectType, lazyLoggerType: + return lf.interfaceVal + case noopType: + return nil + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ":", lf.Value()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go new file mode 100644 index 00000000..d57e28aa --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -0,0 +1,61 @@ +package log + +import ( + "fmt" + "reflect" +) + +// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice +// a la Span.LogFields(). +func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { + if len(keyValues)%2 != 0 { + return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) + } + fields := make([]Field, len(keyValues)/2) + for i := 0; i*2 < len(keyValues); i++ { + key, ok := keyValues[i*2].(string) + if !ok { + return nil, fmt.Errorf( + "non-string key (pair #%d): %T", + i, keyValues[i*2]) + } + switch typedVal := keyValues[i*2+1].(type) { + case bool: + fields[i] = Bool(key, typedVal) + case string: + fields[i] = String(key, typedVal) + case int: + fields[i] = Int(key, typedVal) + case int8: + fields[i] = Int32(key, int32(typedVal)) + case int16: + fields[i] = Int32(key, int32(typedVal)) + case int32: + fields[i] = Int32(key, typedVal) + case int64: + fields[i] = Int64(key, typedVal) + case uint: + fields[i] = Uint64(key, uint64(typedVal)) + case uint64: + fields[i] = Uint64(key, typedVal) + case uint8: + fields[i] = Uint32(key, uint32(typedVal)) + case uint16: + fields[i] = Uint32(key, uint32(typedVal)) + case uint32: + fields[i] = Uint32(key, typedVal) + case float32: + fields[i] = Float32(key, typedVal) + case float64: + fields[i] = Float64(key, typedVal) + default: + if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) { + fields[i] = String(key, "nil") + continue + } + // When in doubt, coerce to a string + fields[i] = String(key, fmt.Sprint(typedVal)) + } + } + return fields, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go new file mode 100644 index 00000000..f9b680a2 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -0,0 +1,64 @@ +package opentracing + +import "github.com/opentracing/opentracing-go/log" + +// A NoopTracer is a trivial, minimum overhead implementation of Tracer +// for which all operations are no-ops. +// +// The primary use of this implementation is in libraries, such as RPC +// frameworks, that make tracing an optional feature controlled by the +// end user. A no-op implementation allows said libraries to use it +// as the default Tracer and to write instrumentation that does +// not need to keep checking if the tracer instance is nil. +// +// For the same reason, the NoopTracer is the default "global" tracer +// (see GlobalTracer and SetGlobalTracer functions). +// +// WARNING: NoopTracer does not support baggage propagation. +type NoopTracer struct{} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext SpanContext = noopSpanContext{} + defaultNoopSpan Span = noopSpan{} + defaultNoopTracer Tracer = NoopTracer{} +) + +const ( + emptyString = "" +) + +// noopSpanContext: +func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// noopSpan: +func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } +func (n noopSpan) SetBaggageItem(key, val string) Span { return n } +func (n noopSpan) BaggageItem(key string) string { return emptyString } +func (n noopSpan) SetTag(key string, value interface{}) Span { return n } +func (n noopSpan) LogFields(fields ...log.Field) {} +func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) Finish() {} +func (n noopSpan) FinishWithOptions(opts FinishOptions) {} +func (n noopSpan) SetOperationName(operationName string) Span { return n } +func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } +func (n noopSpan) LogEvent(event string) {} +func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + return defaultNoopSpan +} + +// Inject belongs to the Tracer interface. +func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go new file mode 100644 index 00000000..b0c275eb --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -0,0 +1,176 @@ +package opentracing + +import ( + "errors" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////// +// CORE PROPAGATION INTERFACES: +/////////////////////////////////////////////////////////////////////////////// + +var ( + // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or + // Tracer.Extract() is not recognized by the Tracer implementation. + ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") + + // ErrSpanContextNotFound occurs when the `carrier` passed to + // Tracer.Extract() is valid and uncorrupted but has insufficient + // information to extract a SpanContext. + ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") + + // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to + // operate on a SpanContext which it is not prepared to handle (for + // example, since it was created by a different tracer implementation). + ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") + + // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() + // implementations expect a different type of `carrier` than they are + // given. + ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") + + // ErrSpanContextCorrupted occurs when the `carrier` passed to + // Tracer.Extract() is of the expected type but is corrupted. + ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") +) + +/////////////////////////////////////////////////////////////////////////////// +// BUILTIN PROPAGATION FORMATS: +/////////////////////////////////////////////////////////////////////////////// + +// BuiltinFormat is used to demarcate the values within package `opentracing` +// that are intended for use with the Tracer.Inject() and Tracer.Extract() +// methods. +type BuiltinFormat byte + +const ( + // Binary represents SpanContexts as opaque binary data. + // + // For Tracer.Inject(): the carrier must be an `io.Writer`. + // + // For Tracer.Extract(): the carrier must be an `io.Reader`. + Binary BuiltinFormat = iota + + // TextMap represents SpanContexts as key:value string pairs. + // + // Unlike HTTPHeaders, the TextMap format does not restrict the key or + // value character sets in any way. + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + TextMap + + // HTTPHeaders represents SpanContexts as HTTP header string pairs. + // + // Unlike TextMap, the HTTPHeaders format requires that the keys and values + // be valid as HTTP headers as-is (i.e., character casing may be unstable + // and special characters are disallowed in keys, values should be + // URL-escaped, etc). + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + // + // See HTTPHeadersCarrier for an implementation of both TextMapWriter + // and TextMapReader that defers to an http.Header instance for storage. + // For example, Inject(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := span.Tracer().Inject( + // span.Context(), opentracing.HTTPHeaders, carrier) + // + // Or Extract(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract( + // opentracing.HTTPHeaders, carrier) + // + HTTPHeaders +) + +// TextMapWriter is the Inject() carrier for the TextMap builtin format. With +// it, the caller can encode a SpanContext for propagation as entries in a map +// of unicode strings. +type TextMapWriter interface { + // Set a key:value pair to the carrier. Multiple calls to Set() for the + // same key leads to undefined behavior. + // + // NOTE: The backing store for the TextMapWriter may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + Set(key, val string) +} + +// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, +// the caller can decode a propagated SpanContext as entries in a map of +// unicode strings. +type TextMapReader interface { + // ForeachKey returns TextMap contents via repeated calls to the `handler` + // function. If any call to `handler` returns a non-nil error, ForeachKey + // terminates and returns that error. + // + // NOTE: The backing store for the TextMapReader may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + // + // The "foreach" callback pattern reduces unnecessary copying in some cases + // and also allows implementations to hold locks while the map is read. + ForeachKey(handler func(key, val string) error) error +} + +// TextMapCarrier allows the use of regular map[string]string +// as both TextMapWriter and TextMapReader. +type TextMapCarrier map[string]string + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +// Set implements Set() of opentracing.TextMapWriter +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. +// +// Example usage for server side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) +// +// Example usage for client side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// err := tracer.Inject( +// span.Context(), +// opentracing.HTTPHeaders, +// carrier) +// +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Set(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go new file mode 100644 index 00000000..0d3fb534 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -0,0 +1,189 @@ +package opentracing + +import ( + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// SpanContext represents Span state that must propagate to descendant Spans and across process +// boundaries (e.g., a tuple). +type SpanContext interface { + // ForeachBaggageItem grants access to all baggage items stored in the + // SpanContext. + // The handler function will be called for each baggage key/value pair. + // The ordering of items is not guaranteed. + // + // The bool return value indicates if the handler wants to continue iterating + // through the rest of the baggage items; for example if the handler is trying to + // find some baggage item by pattern matching the name, it can return false + // as soon as the item is found to stop further iterations. + ForeachBaggageItem(handler func(k, v string) bool) +} + +// Span represents an active, un-finished span in the OpenTracing system. +// +// Spans are created by the Tracer interface. +type Span interface { + // Sets the end timestamp and finalizes Span state. + // + // With the exception of calls to Context() (which are always allowed), + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + Finish() + // FinishWithOptions is like Finish() but with explicit control over + // timestamps and log data. + FinishWithOptions(opts FinishOptions) + + // Context() yields the SpanContext for this Span. Note that the return + // value of Context() is still valid after a call to Span.Finish(), as is + // a call to Span.Context() after a call to Span.Finish(). + Context() SpanContext + + // Sets or changes the operation name. + // + // Returns a reference to this Span for chaining. + SetOperationName(operationName string) Span + + // Adds a tag to the span. + // + // If there is a pre-existing tag set for `key`, it is overwritten. + // + // Tag values can be numeric types, strings, or bools. The behavior of + // other tag value types is undefined at the OpenTracing level. If a + // tracing system does not know how to handle a particular value type, it + // may ignore the tag, but shall not panic. + // + // Returns a reference to this Span for chaining. + SetTag(key string, value interface{}) Span + + // LogFields is an efficient and type-checked way to record key:value + // logging data about a Span, though the programming interface is a little + // more verbose than LogKV(). Here's an example: + // + // span.LogFields( + // log.String("event", "soft error"), + // log.String("type", "cache timeout"), + // log.Int("waited.millis", 1500)) + // + // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. + LogFields(fields ...log.Field) + + // LogKV is a concise, readable way to record key:value logging data about + // a Span, though unfortunately this also makes it less efficient and less + // type-safe than LogFields(). Here's an example: + // + // span.LogKV( + // "event", "soft error", + // "type", "cache timeout", + // "waited.millis", 1500) + // + // For LogKV (as opposed to LogFields()), the parameters must appear as + // key-value pairs, like + // + // span.LogKV(key1, val1, key2, val2, key3, val3, ...) + // + // The keys must all be strings. The values may be strings, numeric types, + // bools, Go error instances, or arbitrary structs. + // + // (Note to implementors: consider the log.InterleavedKVToFields() helper) + LogKV(alternatingKeyValues ...interface{}) + + // SetBaggageItem sets a key:value pair on this Span and its SpanContext + // that also propagates to descendants of this Span. + // + // SetBaggageItem() enables powerful functionality given a full-stack + // opentracing integration (e.g., arbitrary application data from a mobile + // app can make it, transparently, all the way into the depths of a storage + // system), and with it some powerful costs: use this feature with care. + // + // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to + // *future* causal descendants of the associated Span. + // + // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and + // value is copied into every local *and remote* child of the associated + // Span, and that can add up to a lot of network and cpu overhead. + // + // Returns a reference to this Span for chaining. + SetBaggageItem(restrictedKey, value string) Span + + // Gets the value for a baggage item given its key. Returns the empty string + // if the value isn't found in this Span. + BaggageItem(restrictedKey string) string + + // Provides access to the Tracer that created this Span. + Tracer() Tracer + + // Deprecated: use LogFields or LogKV + LogEvent(event string) + // Deprecated: use LogFields or LogKV + LogEventWithPayload(event string, payload interface{}) + // Deprecated: use LogFields or LogKV + Log(data LogData) +} + +// LogRecord is data associated with a single Span log. Every LogRecord +// instance must specify at least one Field. +type LogRecord struct { + Timestamp time.Time + Fields []log.Field +} + +// FinishOptions allows Span.FinishWithOptions callers to override the finish +// timestamp and provide log data via a bulk interface. +type FinishOptions struct { + // FinishTime overrides the Span's finish time, or implicitly becomes + // time.Now() if FinishTime.IsZero(). + // + // FinishTime must resolve to a timestamp that's >= the Span's StartTime + // (per StartSpanOptions). + FinishTime time.Time + + // LogRecords allows the caller to specify the contents of many LogFields() + // calls with a single slice. May be nil. + // + // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must + // be set explicitly). Also, they must be >= the Span's start timestamp and + // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the + // behavior of FinishWithOptions() is undefined. + // + // If specified, the caller hands off ownership of LogRecords at + // FinishWithOptions() invocation time. + // + // If specified, the (deprecated) BulkLogData must be nil or empty. + LogRecords []LogRecord + + // BulkLogData is DEPRECATED. + BulkLogData []LogData +} + +// LogData is DEPRECATED +type LogData struct { + Timestamp time.Time + Event string + Payload interface{} +} + +// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord +func (ld *LogData) ToLogRecord() LogRecord { + var literalTimestamp time.Time + if ld.Timestamp.IsZero() { + literalTimestamp = time.Now() + } else { + literalTimestamp = ld.Timestamp + } + rval := LogRecord{ + Timestamp: literalTimestamp, + } + if ld.Payload == nil { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + } + } else { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + log.Object("payload", ld.Payload), + } + } + return rval +} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go new file mode 100644 index 00000000..715f0ced --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -0,0 +1,304 @@ +package opentracing + +import "time" + +// Tracer is a simple, thin interface for Span creation and SpanContext +// propagation. +type Tracer interface { + + // Create, start, and return a new Span with the given `operationName` and + // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows + // from the "functional options" pattern, per + // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) + // + // A Span with no SpanReference options (e.g., opentracing.ChildOf() or + // opentracing.FollowsFrom()) becomes the root of its own trace. + // + // Examples: + // + // var tracer opentracing.Tracer = ... + // + // // The root-span case: + // sp := tracer.StartSpan("GetFeed") + // + // // The vanilla child span case: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context())) + // + // // All the bells and whistles: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context()), + // opentracing.Tag{"user_agent", loggedReq.UserAgent}, + // opentracing.StartTime(loggedReq.Timestamp), + // ) + // + StartSpan(operationName string, opts ...StartSpanOption) Span + + // Inject() takes the `sm` SpanContext instance and injects it for + // propagation within `carrier`. The actual type of `carrier` depends on + // the value of `format`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see https://godoc.org/context#WithValue). + // + // Example usage (sans error handling): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := tracer.Inject( + // span.Context(), + // opentracing.HTTPHeaders, + // carrier) + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Implementations may return opentracing.ErrUnsupportedFormat if `format` + // is not supported by (or not known by) the implementation. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if the format is supported but injection + // fails anyway. + // + // See Tracer.Extract(). + Inject(sm SpanContext, format interface{}, carrier interface{}) error + + // Extract() returns a SpanContext instance given `format` and `carrier`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (with StartSpan): + // + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + // + // // ... assuming the ultimate goal here is to resume the trace with a + // // server-side Span: + // var serverSpan opentracing.Span + // if err == nil { + // span = tracer.StartSpan( + // rpcMethodName, ext.RPCServerOption(clientContext)) + // } else { + // span = tracer.StartSpan(rpcMethodName) + // } + // + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Return values: + // - A successful Extract returns a SpanContext instance and a nil error + // - If there was simply no SpanContext to extract in `carrier`, Extract() + // returns (nil, opentracing.ErrSpanContextNotFound) + // - If `format` is unsupported or unrecognized, Extract() returns (nil, + // opentracing.ErrUnsupportedFormat) + // - If there are more fundamental problems with the `carrier` object, + // Extract() may return opentracing.ErrInvalidCarrier, + // opentracing.ErrSpanContextCorrupted, or implementation-specific + // errors. + // + // See Tracer.Inject(). + Extract(format interface{}, carrier interface{}) (SpanContext, error) +} + +// StartSpanOptions allows Tracer.StartSpan() callers and implementors a +// mechanism to override the start timestamp, specify Span References, and make +// a single Tag or multiple Tags available at Span start time. +// +// StartSpan() callers should look at the StartSpanOption interface and +// implementations available in this package. +// +// Tracer implementations can convert a slice of `StartSpanOption` instances +// into a `StartSpanOptions` struct like so: +// +// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { +// sso := opentracing.StartSpanOptions{} +// for _, o := range opts { +// o.Apply(&sso) +// } +// ... +// } +// +type StartSpanOptions struct { + // Zero or more causal references to other Spans (via their SpanContext). + // If empty, start a "root" Span (i.e., start a new trace). + References []SpanReference + + // StartTime overrides the Span's start time, or implicitly becomes + // time.Now() if StartTime.IsZero(). + StartTime time.Time + + // Tags may have zero or more entries; the restrictions on map values are + // identical to those for Span.SetTag(). May be nil. + // + // If specified, the caller hands off ownership of Tags at + // StartSpan() invocation time. + Tags map[string]interface{} +} + +// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. +// +// StartSpanOption borrows from the "functional options" pattern, per +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type StartSpanOption interface { + Apply(*StartSpanOptions) +} + +// SpanReferenceType is an enum type describing different categories of +// relationships between two Spans. If Span-2 refers to Span-1, the +// SpanReferenceType describes Span-1 from Span-2's perspective. For example, +// ChildOfRef means that Span-1 created Span-2. +// +// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for +// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, +// or Span-2 may be sitting in a distributed queue behind Span-1. +type SpanReferenceType int + +const ( + // ChildOfRef refers to a parent Span that caused *and* somehow depends + // upon the new child Span. Often (but not always), the parent Span cannot + // finish until the child Span does. + // + // An timing diagram for a ChildOfRef that's blocked on the new Span: + // + // [-Parent Span---------] + // [-Child Span----] + // + // See http://opentracing.io/spec/ + // + // See opentracing.ChildOf() + ChildOfRef SpanReferenceType = iota + + // FollowsFromRef refers to a parent Span that does not depend in any way + // on the result of the new child Span. For instance, one might use + // FollowsFromRefs to describe pipeline stages separated by queues, + // or a fire-and-forget cache insert at the tail end of a web request. + // + // A FollowsFromRef Span is part of the same logical trace as the new Span: + // i.e., the new Span is somehow caused by the work of its FollowsFromRef. + // + // All of the following could be valid timing diagrams for children that + // "FollowFrom" a parent. + // + // [-Parent Span-] [-Child Span-] + // + // + // [-Parent Span--] + // [-Child Span-] + // + // + // [-Parent Span-] + // [-Child Span-] + // + // See http://opentracing.io/spec/ + // + // See opentracing.FollowsFrom() + FollowsFromRef +) + +// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a +// referenced SpanContext. See the SpanReferenceType documentation for +// supported relationships. If SpanReference is created with +// ReferencedContext==nil, it has no effect. Thus it allows for a more concise +// syntax for starting spans: +// +// sc, _ := tracer.Extract(someFormat, someCarrier) +// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) +// +// The `ChildOf(sc)` option above will not panic if sc == nil, it will just +// not add the parent span reference to the options. +type SpanReference struct { + Type SpanReferenceType + ReferencedContext SpanContext +} + +// Apply satisfies the StartSpanOption interface. +func (r SpanReference) Apply(o *StartSpanOptions) { + if r.ReferencedContext != nil { + o.References = append(o.References, r) + } +} + +// ChildOf returns a StartSpanOption pointing to a dependent parent span. +// If sc == nil, the option has no effect. +// +// See ChildOfRef, SpanReference +func ChildOf(sc SpanContext) SpanReference { + return SpanReference{ + Type: ChildOfRef, + ReferencedContext: sc, + } +} + +// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused +// the child Span but does not directly depend on its result in any way. +// If sc == nil, the option has no effect. +// +// See FollowsFromRef, SpanReference +func FollowsFrom(sc SpanContext) SpanReference { + return SpanReference{ + Type: FollowsFromRef, + ReferencedContext: sc, + } +} + +// StartTime is a StartSpanOption that sets an explicit start timestamp for the +// new Span. +type StartTime time.Time + +// Apply satisfies the StartSpanOption interface. +func (t StartTime) Apply(o *StartSpanOptions) { + o.StartTime = time.Time(t) +} + +// Tags are a generic map from an arbitrary string key to an opaque value type. +// The underlying tracing system is responsible for interpreting and +// serializing the values. +type Tags map[string]interface{} + +// Apply satisfies the StartSpanOption interface. +func (t Tags) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + for k, v := range t { + o.Tags[k] = v + } +} + +// Tag may be passed as a StartSpanOption to add a tag to new spans, +// or its Set method may be used to apply the tag to an existing Span, +// for example: +// +// tracer.StartSpan("opName", Tag{"Key", value}) +// +// or +// +// Tag{"key", value}.Set(span) +type Tag struct { + Key string + Value interface{} +} + +// Apply satisfies the StartSpanOption interface. +func (t Tag) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + o.Tags[t.Key] = t.Value +} + +// Set applies the tag to an existing Span. +func (t Tag) Set(s Span) { + s.SetTag(t.Key, t.Value) +} diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md new file mode 100644 index 00000000..1ac6a81f --- /dev/null +++ b/vendor/github.com/philhofer/fwd/LICENSE.md @@ -0,0 +1,7 @@ +Copyright (c) 2014-2015, Philip Hofer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md new file mode 100644 index 00000000..38349af3 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/README.md @@ -0,0 +1,315 @@ + +# fwd + import "github.com/philhofer/fwd" + +The `fwd` package provides a buffered reader +and writer. Each has methods that help improve +the encoding/decoding performance of some binary +protocols. + +The `fwd.Writer` and `fwd.Reader` type provide similar +functionality to their counterparts in `bufio`, plus +a few extra utility methods that simplify read-ahead +and write-ahead. I wrote this package to improve serialization +performance for http://github.com/tinylib/msgp, +where it provided about a 2x speedup over `bufio` for certain +workloads. However, care must be taken to understand the semantics of the +extra methods provided by this package, as they allow +the user to access and manipulate the buffer memory +directly. + +The extra methods for `fwd.Reader` are `Peek`, `Skip` +and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +will re-allocate the read buffer in order to accommodate arbitrarily +large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +in the stream, and uses the `io.Seeker` interface if the underlying +stream implements it. `(*fwd.Reader).Next` returns a slice pointing +to the next `n` bytes in the read buffer (like `Peek`), but also +increments the read position. This allows users to process streams +in arbitrary block sizes without having to manage appropriately-sized +slices. Additionally, obviating the need to copy the data from the +buffer to another location in memory can improve performance dramatically +in CPU-bound applications. + +`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +returns a slice pointing to the next `n` bytes of the writer, and increments +the write position by the length of the returned slice. This allows users +to write directly to the end of the buffer. + + + + +## Constants +``` go +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 +) +``` +``` go +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 +) +``` + + + +## type Reader +``` go +type Reader struct { + // contains filtered or unexported fields +} +``` +Reader is a buffered look-ahead reader + + + + + + + + + +### func NewReader +``` go +func NewReader(r io.Reader) *Reader +``` +NewReader returns a new *Reader that reads from 'r' + + +### func NewReaderSize +``` go +func NewReaderSize(r io.Reader, n int) *Reader +``` +NewReaderSize returns a new *Reader that +reads from 'r' and has a buffer size 'n' + + + + +### func (\*Reader) BufferSize +``` go +func (r *Reader) BufferSize() int +``` +BufferSize returns the total size of the buffer + + + +### func (\*Reader) Buffered +``` go +func (r *Reader) Buffered() int +``` +Buffered returns the number of bytes currently in the buffer + + + +### func (\*Reader) Next +``` go +func (r *Reader) Next(n int) ([]byte, error) +``` +Next returns the next 'n' bytes in the stream. +Unlike Peek, Next advances the reader position. +The returned bytes point to the same +data as the buffer, so the slice is +only valid until the next reader method call. +An EOF is considered an unexpected error. +If an the returned slice is less than the +length asked for, an error will be returned, +and the reader position will not be incremented. + + + +### func (\*Reader) Peek +``` go +func (r *Reader) Peek(n int) ([]byte, error) +``` +Peek returns the next 'n' buffered bytes, +reading from the underlying reader if necessary. +It will only return a slice shorter than 'n' bytes +if it also returns an error. Peek does not advance +the reader. EOF errors are *not* returned as +io.ErrUnexpectedEOF. + + + +### func (\*Reader) Read +``` go +func (r *Reader) Read(b []byte) (int, error) +``` +Read implements `io.Reader` + + + +### func (\*Reader) ReadByte +``` go +func (r *Reader) ReadByte() (byte, error) +``` +ReadByte implements `io.ByteReader` + + + +### func (\*Reader) ReadFull +``` go +func (r *Reader) ReadFull(b []byte) (int, error) +``` +ReadFull attempts to read len(b) bytes into +'b'. It returns the number of bytes read into +'b', and an error if it does not return len(b). +EOF is considered an unexpected error. + + + +### func (\*Reader) Reset +``` go +func (r *Reader) Reset(rd io.Reader) +``` +Reset resets the underlying reader +and the read buffer. + + + +### func (\*Reader) Skip +``` go +func (r *Reader) Skip(n int) (int, error) +``` +Skip moves the reader forward 'n' bytes. +Returns the number of bytes skipped and any +errors encountered. It is analogous to Seek(n, 1). +If the underlying reader implements io.Seeker, then +that method will be used to skip forward. + +If the reader encounters +an EOF before skipping 'n' bytes, it +returns io.ErrUnexpectedEOF. If the +underlying reader implements io.Seeker, then +those rules apply instead. (Many implementations +will not return `io.EOF` until the next call +to Read.) + + + +### func (\*Reader) WriteTo +``` go +func (r *Reader) WriteTo(w io.Writer) (int64, error) +``` +WriteTo implements `io.WriterTo` + + + +## type Writer +``` go +type Writer struct { + // contains filtered or unexported fields +} +``` +Writer is a buffered writer + + + + + + + + + +### func NewWriter +``` go +func NewWriter(w io.Writer) *Writer +``` +NewWriter returns a new writer +that writes to 'w' and has a buffer +that is `DefaultWriterSize` bytes. + + +### func NewWriterSize +``` go +func NewWriterSize(w io.Writer, size int) *Writer +``` +NewWriterSize returns a new writer +that writes to 'w' and has a buffer +that is 'size' bytes. + + + + +### func (\*Writer) BufferSize +``` go +func (w *Writer) BufferSize() int +``` +BufferSize returns the maximum size of the buffer. + + + +### func (\*Writer) Buffered +``` go +func (w *Writer) Buffered() int +``` +Buffered returns the number of buffered bytes +in the reader. + + + +### func (\*Writer) Flush +``` go +func (w *Writer) Flush() error +``` +Flush flushes any buffered bytes +to the underlying writer. + + + +### func (\*Writer) Next +``` go +func (w *Writer) Next(n int) ([]byte, error) +``` +Next returns the next 'n' free bytes +in the write buffer, flushing the writer +as necessary. Next will return `io.ErrShortBuffer` +if 'n' is greater than the size of the write buffer. +Calls to 'next' increment the write position by +the size of the returned buffer. + + + +### func (\*Writer) ReadFrom +``` go +func (w *Writer) ReadFrom(r io.Reader) (int64, error) +``` +ReadFrom implements `io.ReaderFrom` + + + +### func (\*Writer) Write +``` go +func (w *Writer) Write(p []byte) (int, error) +``` +Write implements `io.Writer` + + + +### func (\*Writer) WriteByte +``` go +func (w *Writer) WriteByte(b byte) error +``` +WriteByte implements `io.ByteWriter` + + + +### func (\*Writer) WriteString +``` go +func (w *Writer) WriteString(s string) (int, error) +``` +WriteString is analogous to Write, but it takes a string. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go new file mode 100644 index 00000000..72cc112b --- /dev/null +++ b/vendor/github.com/philhofer/fwd/reader.go @@ -0,0 +1,383 @@ +// The `fwd` package provides a buffered reader +// and writer. Each has methods that help improve +// the encoding/decoding performance of some binary +// protocols. +// +// The `fwd.Writer` and `fwd.Reader` type provide similar +// functionality to their counterparts in `bufio`, plus +// a few extra utility methods that simplify read-ahead +// and write-ahead. I wrote this package to improve serialization +// performance for http://github.com/tinylib/msgp, +// where it provided about a 2x speedup over `bufio` for certain +// workloads. However, care must be taken to understand the semantics of the +// extra methods provided by this package, as they allow +// the user to access and manipulate the buffer memory +// directly. +// +// The extra methods for `fwd.Reader` are `Peek`, `Skip` +// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +// will re-allocate the read buffer in order to accommodate arbitrarily +// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +// in the stream, and uses the `io.Seeker` interface if the underlying +// stream implements it. `(*fwd.Reader).Next` returns a slice pointing +// to the next `n` bytes in the read buffer (like `Peek`), but also +// increments the read position. This allows users to process streams +// in arbitrary block sizes without having to manage appropriately-sized +// slices. Additionally, obviating the need to copy the data from the +// buffer to another location in memory can improve performance dramatically +// in CPU-bound applications. +// +// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +// returns a slice pointing to the next `n` bytes of the writer, and increments +// the write position by the length of the returned slice. This allows users +// to write directly to the end of the buffer. +// +package fwd + +import ( + "io" + "os" +) + +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 + + // minimum read buffer; straight from bufio + minReaderSize = 16 +) + +// NewReader returns a new *Reader that reads from 'r' +func NewReader(r io.Reader) *Reader { + return NewReaderSize(r, DefaultReaderSize) +} + +// NewReaderSize returns a new *Reader that +// reads from 'r' and has a buffer size 'n'. +func NewReaderSize(r io.Reader, n int) *Reader { + buf := make([]byte, 0, max(n, minReaderSize)) + return NewReaderBuf(r, buf) +} + +// NewReaderBuf returns a new *Reader that +// reads from 'r' and uses 'buf' as a buffer. +// 'buf' is not used when has smaller capacity than 16, +// custom buffer is allocated instead. +func NewReaderBuf(r io.Reader, buf []byte) *Reader { + if cap(buf) < minReaderSize { + buf = make([]byte, 0, minReaderSize) + } + buf = buf[:0] + rd := &Reader{ + r: r, + data: buf, + } + if s, ok := r.(io.Seeker); ok { + rd.rs = s + } + return rd +} + +// Reader is a buffered look-ahead reader +type Reader struct { + r io.Reader // underlying reader + + // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space + data []byte // data + n int // read offset + state error // last read error + + // if the reader past to NewReader was + // also an io.Seeker, this is non-nil + rs io.Seeker +} + +// Reset resets the underlying reader +// and the read buffer. +func (r *Reader) Reset(rd io.Reader) { + r.r = rd + r.data = r.data[0:0] + r.n = 0 + r.state = nil + if s, ok := rd.(io.Seeker); ok { + r.rs = s + } else { + r.rs = nil + } +} + +// more() does one read on the underlying reader +func (r *Reader) more() { + // move data backwards so that + // the read offset is 0; this way + // we can supply the maximum number of + // bytes to the reader + if r.n != 0 { + if r.n < len(r.data) { + r.data = r.data[:copy(r.data[0:], r.data[r.n:])] + } else { + r.data = r.data[:0] + } + r.n = 0 + } + var a int + a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)]) + if a == 0 && r.state == nil { + r.state = io.ErrNoProgress + return + } else if a > 0 && r.state == io.EOF { + // discard the io.EOF if we read more than 0 bytes. + // the next call to Read should return io.EOF again. + r.state = nil + } + r.data = r.data[:len(r.data)+a] +} + +// pop error +func (r *Reader) err() (e error) { + e, r.state = r.state, nil + return +} + +// pop error; EOF -> io.ErrUnexpectedEOF +func (r *Reader) noEOF() (e error) { + e, r.state = r.state, nil + if e == io.EOF { + e = io.ErrUnexpectedEOF + } + return +} + +// buffered bytes +func (r *Reader) buffered() int { return len(r.data) - r.n } + +// Buffered returns the number of bytes currently in the buffer +func (r *Reader) Buffered() int { return len(r.data) - r.n } + +// BufferSize returns the total size of the buffer +func (r *Reader) BufferSize() int { return cap(r.data) } + +// Peek returns the next 'n' buffered bytes, +// reading from the underlying reader if necessary. +// It will only return a slice shorter than 'n' bytes +// if it also returns an error. Peek does not advance +// the reader. EOF errors are *not* returned as +// io.ErrUnexpectedEOF. +func (r *Reader) Peek(n int) ([]byte, error) { + // in the degenerate case, + // we may need to realloc + // (the caller asked for more + // bytes than the size of the buffer) + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // keep filling until + // we hit an error or + // read enough bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + // we must have hit an error + if r.buffered() < n { + return r.data[r.n:], r.err() + } + + return r.data[r.n : r.n+n], nil +} + +// discard(n) discards up to 'n' buffered bytes, and +// and returns the number of bytes discarded +func (r *Reader) discard(n int) int { + inbuf := r.buffered() + if inbuf <= n { + r.n = 0 + r.data = r.data[:0] + return inbuf + } + r.n += n + return n +} + +// Skip moves the reader forward 'n' bytes. +// Returns the number of bytes skipped and any +// errors encountered. It is analogous to Seek(n, 1). +// If the underlying reader implements io.Seeker, then +// that method will be used to skip forward. +// +// If the reader encounters +// an EOF before skipping 'n' bytes, it +// returns io.ErrUnexpectedEOF. If the +// underlying reader implements io.Seeker, then +// those rules apply instead. (Many implementations +// will not return `io.EOF` until the next call +// to Read.) +func (r *Reader) Skip(n int) (int, error) { + if n < 0 { + return 0, os.ErrInvalid + } + + // discard some or all of the current buffer + skipped := r.discard(n) + + // if we can Seek() through the remaining bytes, do that + if n > skipped && r.rs != nil { + nn, err := r.rs.Seek(int64(n-skipped), 1) + return int(nn) + skipped, err + } + // otherwise, keep filling the buffer + // and discarding it up to 'n' + for skipped < n && r.state == nil { + r.more() + skipped += r.discard(n - skipped) + } + return skipped, r.noEOF() +} + +// Next returns the next 'n' bytes in the stream. +// Unlike Peek, Next advances the reader position. +// The returned bytes point to the same +// data as the buffer, so the slice is +// only valid until the next reader method call. +// An EOF is considered an unexpected error. +// If an the returned slice is less than the +// length asked for, an error will be returned, +// and the reader position will not be incremented. +func (r *Reader) Next(n int) ([]byte, error) { + + // in case the buffer is too small + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // fill at least 'n' bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + if r.buffered() < n { + return r.data[r.n:], r.noEOF() + } + out := r.data[r.n : r.n+n] + r.n += n + return out, nil +} + +// Read implements `io.Reader` +func (r *Reader) Read(b []byte) (int, error) { + // if we have data in the buffer, just + // return that. + if r.buffered() != 0 { + x := copy(b, r.data[r.n:]) + r.n += x + return x, nil + } + var n int + // we have no buffered data; determine + // whether or not to buffer or call + // the underlying reader directly + if len(b) >= cap(r.data) { + n, r.state = r.r.Read(b) + } else { + r.more() + n = copy(b, r.data) + r.n = n + } + if n == 0 { + return 0, r.err() + } + return n, nil +} + +// ReadFull attempts to read len(b) bytes into +// 'b'. It returns the number of bytes read into +// 'b', and an error if it does not return len(b). +// EOF is considered an unexpected error. +func (r *Reader) ReadFull(b []byte) (int, error) { + var n int // read into b + var nn int // scratch + l := len(b) + // either read buffered data, + // or read directly for the underlying + // buffer, or fetch more buffered data. + for n < l && r.state == nil { + if r.buffered() != 0 { + nn = copy(b[n:], r.data[r.n:]) + n += nn + r.n += nn + } else if l-n > cap(r.data) { + nn, r.state = r.r.Read(b[n:]) + n += nn + } else { + r.more() + } + } + if n < l { + return n, r.noEOF() + } + return n, nil +} + +// ReadByte implements `io.ByteReader` +func (r *Reader) ReadByte() (byte, error) { + for r.buffered() < 1 && r.state == nil { + r.more() + } + if r.buffered() < 1 { + return 0, r.err() + } + b := r.data[r.n] + r.n++ + return b, nil +} + +// WriteTo implements `io.WriterTo` +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + var ( + i int64 + ii int + err error + ) + // first, clear buffer + if r.buffered() > 0 { + ii, err = w.Write(r.data[r.n:]) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + for r.state == nil { + // here we just do + // 1:1 reads and writes + r.more() + if r.buffered() > 0 { + ii, err = w.Write(r.data) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + } + if r.state != io.EOF { + return i, r.err() + } + return i, nil +} + +func max(a int, b int) int { + if a < b { + return b + } + return a +} diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go new file mode 100644 index 00000000..4d6ea15b --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer.go @@ -0,0 +1,236 @@ +package fwd + +import "io" + +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 + + minWriterSize = minReaderSize +) + +// Writer is a buffered writer +type Writer struct { + w io.Writer // writer + buf []byte // 0:len(buf) is bufered data +} + +// NewWriter returns a new writer +// that writes to 'w' and has a buffer +// that is `DefaultWriterSize` bytes. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, DefaultWriterSize), + } +} + +// NewWriterSize returns a new writer that +// writes to 'w' and has a buffer size 'n'. +func NewWriterSize(w io.Writer, n int) *Writer { + if wr, ok := w.(*Writer); ok && cap(wr.buf) >= n { + return wr + } + buf := make([]byte, 0, max(n, minWriterSize)) + return NewWriterBuf(w, buf) +} + +// NewWriterBuf returns a new writer +// that writes to 'w' and has 'buf' as a buffer. +// 'buf' is not used when has smaller capacity than 18, +// custom buffer is allocated instead. +func NewWriterBuf(w io.Writer, buf []byte) *Writer { + if cap(buf) < minWriterSize { + buf = make([]byte, 0, minWriterSize) + } + buf = buf[:0] + return &Writer{ + w: w, + buf: buf, + } +} + +// Buffered returns the number of buffered bytes +// in the reader. +func (w *Writer) Buffered() int { return len(w.buf) } + +// BufferSize returns the maximum size of the buffer. +func (w *Writer) BufferSize() int { return cap(w.buf) } + +// Flush flushes any buffered bytes +// to the underlying writer. +func (w *Writer) Flush() error { + l := len(w.buf) + if l > 0 { + n, err := w.w.Write(w.buf) + + // if we didn't write the whole + // thing, copy the unwritten + // bytes to the beginnning of the + // buffer. + if n < l && n > 0 { + w.pushback(n) + if err == nil { + err = io.ErrShortWrite + } + } + if err != nil { + return err + } + w.buf = w.buf[:0] + return nil + } + return nil +} + +// Write implements `io.Writer` +func (w *Writer) Write(p []byte) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(p) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + if c < ln { + return w.w.Write(p) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], p), nil +} + +// WriteString is analogous to Write, but it takes a string. +func (w *Writer) WriteString(s string) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(s) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + // + // yes, this is unsafe. *but* + // io.Writer is not allowed + // to mutate its input or + // maintain a reference to it, + // per the spec in package io. + // + // plus, if the string is really + // too big to fit in the buffer, then + // creating a copy to write it is + // expensive (and, strictly speaking, + // unnecessary) + if c < ln { + return w.w.Write(unsafestr(s)) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], s), nil +} + +// WriteByte implements `io.ByteWriter` +func (w *Writer) WriteByte(b byte) error { + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + w.buf = append(w.buf, b) + return nil +} + +// Next returns the next 'n' free bytes +// in the write buffer, flushing the writer +// as necessary. Next will return `io.ErrShortBuffer` +// if 'n' is greater than the size of the write buffer. +// Calls to 'next' increment the write position by +// the size of the returned buffer. +func (w *Writer) Next(n int) ([]byte, error) { + c, l := cap(w.buf), len(w.buf) + if n > c { + return nil, io.ErrShortBuffer + } + avail := c - l + if avail < n { + if err := w.Flush(); err != nil { + return nil, err + } + l = len(w.buf) + } + w.buf = w.buf[:l+n] + return w.buf[l:], nil +} + +// take the bytes from w.buf[n:len(w.buf)] +// and put them at the beginning of w.buf, +// and resize to the length of the copied segment. +func (w *Writer) pushback(n int) { + w.buf = w.buf[:copy(w.buf, w.buf[n:])] +} + +// ReadFrom implements `io.ReaderFrom` +func (w *Writer) ReadFrom(r io.Reader) (int64, error) { + // anticipatory flush + if err := w.Flush(); err != nil { + return 0, err + } + + w.buf = w.buf[0:cap(w.buf)] // expand buffer + + var nn int64 // written + var err error // error + var x int // read + + // 1:1 reads and writes + for err == nil { + x, err = r.Read(w.buf) + if x > 0 { + n, werr := w.w.Write(w.buf[:x]) + nn += int64(n) + + if err != nil { + if n < x && n > 0 { + w.pushback(n - x) + } + return nn, werr + } + if n < x { + w.pushback(n - x) + return nn, io.ErrShortWrite + } + } else if err == nil { + err = io.ErrNoProgress + break + } + } + if err != io.EOF { + return nn, err + } + + // we only clear here + // because we are sure + // the writes have + // succeeded. otherwise, + // we retain the data in case + // future writes succeed. + w.buf = w.buf[0:0] + + return nn, nil +} diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go new file mode 100644 index 00000000..e367f393 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_appengine.go @@ -0,0 +1,5 @@ +// +build appengine + +package fwd + +func unsafestr(s string) []byte { return []byte(s) } diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go new file mode 100644 index 00000000..a0bf453b --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_unsafe.go @@ -0,0 +1,18 @@ +// +build !appengine + +package fwd + +import ( + "reflect" + "unsafe" +) + +// unsafe cast string as []byte +func unsafestr(b string) []byte { + l := len(b) + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: l, + Cap: l, + Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data, + })) +} diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml new file mode 100644 index 00000000..c73bb33b --- /dev/null +++ b/vendor/github.com/rs/xid/.appveyor.yml @@ -0,0 +1,27 @@ +version: 1.0.0.{build} + +platform: x64 + +branches: + only: + - master + +clone_folder: c:\gopath\src\github.com\rs\xid + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -t . + +build_script: + - go build + +test_script: + - go test + diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml new file mode 100644 index 00000000..b37da159 --- /dev/null +++ b/vendor/github.com/rs/xid/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: +- "1.9" +- "1.10" +- "master" +matrix: + allow_failures: + - go: "master" diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/github.com/rs/xid/LICENSE similarity index 83% rename from vendor/go.uber.org/zap/LICENSE.txt rename to vendor/github.com/rs/xid/LICENSE index 6652bed4..47c5e9d2 100644 --- a/vendor/go.uber.org/zap/LICENSE.txt +++ b/vendor/github.com/rs/xid/LICENSE @@ -1,14 +1,14 @@ -Copyright (c) 2016-2017 Uber Technologies, Inc. +Copyright (c) 2015 Olivier Poitrey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md new file mode 100644 index 00000000..5bf462e8 --- /dev/null +++ b/vendor/github.com/rs/xid/README.md @@ -0,0 +1,116 @@ +# Globally Unique ID Generator + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid) + +Package xid is a globally unique id generator library, ready to safely be used directly in your server code. + +Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string: +https://docs.mongodb.org/manual/reference/object-id/ + +- 4-byte value representing the seconds since the Unix epoch, +- 3-byte machine identifier, +- 2-byte process id, and +- 3-byte counter, starting with a random value. + +The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +The string representation is using base32 hex (w/o padding) for better space efficiency +when stored in that form (20 bytes). The hex variant of base32 is used to retain the +sortable property of the id. + +Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +issue when transported as a string between various systems. Base36 wasn't retained either +because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). + +UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake +ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central +generator servers. xid stands in between with 12 bytes (96 bits) and a more compact +URL-safe string representation (20 chars). No configuration or central generator server +is required so it can be used directly in server's code. + +| Name | Binary Size | String Size | Features +|-------------|-------------|----------------|---------------- +| [UUID] | 16 bytes | 36 chars | configuration free, not sortable +| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable +| [Snowflake] | 8 bytes | up to 20 chars | needs machine/DC configuration, needs central server, sortable +| [MongoID] | 12 bytes | 24 chars | configuration free, sortable +| xid | 12 bytes | 20 chars | configuration free, sortable + +[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier +[shortuuid]: https://github.com/stochastic-technologies/shortuuid +[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake +[MongoID]: https://docs.mongodb.org/manual/reference/object-id/ + +Features: + +- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +- Base32 hex encoded by default (20 chars when transported as printable string, still sortable) +- Non configured, you don't need set a unique machine and/or data center id +- K-ordered +- Embedded time with 1 second precision +- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +- Lock-free (i.e.: unlike UUIDv1 and v2) + +Best used with [zerolog](https://github.com/rs/zerolog)'s +[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler). + +Notes: + +- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most other UUID-like implementations are also not cryptographically secure. You should use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator. + +References: + +- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +- https://en.wikipedia.org/wiki/Universally_unique_identifier +- https://blog.twitter.com/2010/announcing-snowflake +- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid +- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride +- Rust port by [Jérôme Renard](https://github.com/jeromer/): https://github.com/jeromer/libxid +- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid +- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid +- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid + +## Install + + go get github.com/rs/xid + +## Usage + +```go +guid := xid.New() + +println(guid.String()) +// Output: 9m4e2mr0ui3e8a215n4g +``` + +Get `xid` embedded info: + +```go +guid.Machine() +guid.Pid() +guid.Time() +guid.Counter() +``` + +## Benchmark + +Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid). + +``` +BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op +BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op +BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op +BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op +``` + +Note: UUIDv1 requires a global lock, hence the performance degradation as we add more CPUs. + +## Licenses + +All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE). diff --git a/vendor/github.com/rs/xid/error.go b/vendor/github.com/rs/xid/error.go new file mode 100644 index 00000000..ea253749 --- /dev/null +++ b/vendor/github.com/rs/xid/error.go @@ -0,0 +1,11 @@ +package xid + +const ( + // ErrInvalidID is returned when trying to unmarshal an invalid ID. + ErrInvalidID strErr = "xid: invalid ID" +) + +// strErr allows declaring errors as constants. +type strErr string + +func (err strErr) Error() string { return string(err) } diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go new file mode 100644 index 00000000..08351ff7 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_darwin.go @@ -0,0 +1,9 @@ +// +build darwin + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.uuid") +} diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go new file mode 100644 index 00000000..7fbd3c00 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_fallback.go @@ -0,0 +1,9 @@ +// +build !darwin,!linux,!freebsd,!windows + +package xid + +import "errors" + +func readPlatformMachineID() (string, error) { + return "", errors.New("not implemented") +} diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go new file mode 100644 index 00000000..be25a039 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_freebsd.go @@ -0,0 +1,9 @@ +// +build freebsd + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.hostuuid") +} diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go new file mode 100644 index 00000000..837b2043 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_linux.go @@ -0,0 +1,13 @@ +// +build linux + +package xid + +import "io/ioutil" + +func readPlatformMachineID() (string, error) { + b, err := ioutil.ReadFile("/etc/machine-id") + if err != nil || len(b) == 0 { + b, err = ioutil.ReadFile("/sys/class/dmi/id/product_uuid") + } + return string(b), err +} diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go new file mode 100644 index 00000000..ec2593ee --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_windows.go @@ -0,0 +1,38 @@ +// +build windows + +package xid + +import ( + "fmt" + "syscall" + "unsafe" +) + +func readPlatformMachineID() (string, error) { + // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go + var h syscall.Handle + err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) + if err != nil { + return "", err + } + defer syscall.RegCloseKey(h) + + const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 + const uuidLen = 36 + + var regBuf [syscallRegBufLen]uint16 + bufLen := uint32(syscallRegBufLen) + var valType uint32 + err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", err + } + + hostID := syscall.UTF16ToString(regBuf[:]) + hostIDLen := len(hostID) + if hostIDLen != uuidLen { + return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + } + + return hostID, nil +} diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go new file mode 100644 index 00000000..1f536b41 --- /dev/null +++ b/vendor/github.com/rs/xid/id.go @@ -0,0 +1,392 @@ +// Package xid is a globally unique id generator suited for web scale +// +// Xid is using Mongo Object ID algorithm to generate globally unique ids: +// https://docs.mongodb.org/manual/reference/object-id/ +// +// - 4-byte value representing the seconds since the Unix epoch, +// - 3-byte machine identifier, +// - 2-byte process id, and +// - 3-byte counter, starting with a random value. +// +// The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +// The string representation is using base32 hex (w/o padding) for better space efficiency +// when stored in that form (20 bytes). The hex variant of base32 is used to retain the +// sortable property of the id. +// +// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +// issue when transported as a string between various systems. Base36 wasn't retained either +// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). +// +// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between +// with 12 bytes with a more compact string representation ready for the web and no +// required configuration or central generation server. +// +// Features: +// +// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +// - Base32 hex encoded by default (16 bytes storage when transported as printable string) +// - Non configured, you don't need set a unique machine and/or data center id +// - K-ordered +// - Embedded time with 1 second precision +// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +// +// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler). +// +// References: +// +// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +// - https://en.wikipedia.org/wiki/Universally_unique_identifier +// - https://blog.twitter.com/2010/announcing-snowflake +package xid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "database/sql/driver" + "encoding/binary" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "sort" + "sync/atomic" + "time" + "unsafe" +) + +// Code inspired from mgo/bson ObjectId + +// ID represents a unique request id +type ID [rawLen]byte + +const ( + encodedLen = 20 // string encoded len + rawLen = 12 // binary raw len + + // encoding stores a custom version of the base32 encoding with lower case + // letters. + encoding = "0123456789abcdefghijklmnopqrstuv" +) + +var ( + // objectIDCounter is atomically incremented when generating a new ObjectId + // using NewObjectId() function. It's used as a counter part of an id. + // This id is initialized with a random value. + objectIDCounter = randInt() + + // machineId stores machine id generated once and used in subsequent calls + // to NewObjectId function. + machineID = readMachineID() + + // pid stores the current process id + pid = os.Getpid() + + nilID ID + + // dec is the decoding map for base32 encoding + dec [256]byte +) + +func init() { + for i := 0; i < len(dec); i++ { + dec[i] = 0xFF + } + for i := 0; i < len(encoding); i++ { + dec[encoding[i]] = byte(i) + } + + // If /proc/self/cpuset exists and is not /, we can assume that we are in a + // form of container and use the content of cpuset xor-ed with the PID in + // order get a reasonable machine global unique PID. + b, err := ioutil.ReadFile("/proc/self/cpuset") + if err == nil && len(b) > 1 { + pid ^= int(crc32.ChecksumIEEE(b)) + } +} + +// readMachineId generates machine id and puts it into the machineId global +// variable. If this function fails to get the hostname, it will cause +// a runtime error. +func readMachineID() []byte { + id := make([]byte, 3) + hid, err := readPlatformMachineID() + if err != nil || len(hid) == 0 { + hid, err = os.Hostname() + } + if err == nil && len(hid) != 0 { + hw := md5.New() + hw.Write([]byte(hid)) + copy(id, hw.Sum(nil)) + } else { + // Fallback to rand number if machine id can't be gathered + if _, randErr := rand.Reader.Read(id); randErr != nil { + panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr)) + } + } + return id +} + +// randInt generates a random uint32 +func randInt() uint32 { + b := make([]byte, 3) + if _, err := rand.Reader.Read(b); err != nil { + panic(fmt.Errorf("xid: cannot generate random number: %v;", err)) + } + return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]) +} + +// New generates a globally unique ID +func New() ID { + return NewWithTime(time.Now()) +} + +// NewWithTime generates a globally unique ID with the passed in time +func NewWithTime(t time.Time) ID { + var id ID + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) + // Machine, first 3 bytes of md5(hostname) + id[4] = machineID[0] + id[5] = machineID[1] + id[6] = machineID[2] + // Pid, 2 bytes, specs don't specify endianness, but we use big endian. + id[7] = byte(pid >> 8) + id[8] = byte(pid) + // Increment, 3 bytes, big endian + i := atomic.AddUint32(&objectIDCounter, 1) + id[9] = byte(i >> 16) + id[10] = byte(i >> 8) + id[11] = byte(i) + return id +} + +// FromString reads an ID from its string representation +func FromString(id string) (ID, error) { + i := &ID{} + err := i.UnmarshalText([]byte(id)) + return *i, err +} + +// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v). +func (id ID) String() string { + text := make([]byte, encodedLen) + encode(text, id[:]) + return *(*string)(unsafe.Pointer(&text)) +} + +// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it. +func (id ID) Encode(dst []byte) []byte { + encode(dst, id[:]) + return dst +} + +// MarshalText implements encoding/text TextMarshaler interface +func (id ID) MarshalText() ([]byte, error) { + text := make([]byte, encodedLen) + encode(text, id[:]) + return text, nil +} + +// MarshalJSON implements encoding/json Marshaler interface +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsNil() { + return []byte("null"), nil + } + text := make([]byte, encodedLen+2) + encode(text[1:encodedLen+1], id[:]) + text[0], text[encodedLen+1] = '"', '"' + return text, nil +} + +// encode by unrolling the stdlib base32 algorithm + removing all safe checks +func encode(dst, id []byte) { + _ = dst[19] + _ = id[11] + + dst[19] = encoding[(id[11]<<4)&0x1F] + dst[18] = encoding[(id[11]>>1)&0x1F] + dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + dst[16] = encoding[id[10]>>3] + dst[15] = encoding[id[9]&0x1F] + dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] + dst[13] = encoding[(id[8]>>2)&0x1F] + dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] + dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F] + dst[10] = encoding[(id[6]>>1)&0x1F] + dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F] + dst[8] = encoding[id[5]>>3] + dst[7] = encoding[id[4]&0x1F] + dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] + dst[5] = encoding[(id[3]>>2)&0x1F] + dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] + dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F] + dst[2] = encoding[(id[1]>>1)&0x1F] + dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F] + dst[0] = encoding[id[0]>>3] +} + +// UnmarshalText implements encoding/text TextUnmarshaler interface +func (id *ID) UnmarshalText(text []byte) error { + if len(text) != encodedLen { + return ErrInvalidID + } + for _, c := range text { + if dec[c] == 0xFF { + return ErrInvalidID + } + } + if !decode(id, text) { + return ErrInvalidID + } + return nil +} + +// UnmarshalJSON implements encoding/json Unmarshaler interface +func (id *ID) UnmarshalJSON(b []byte) error { + s := string(b) + if s == "null" { + *id = nilID + return nil + } + // Check the slice length to prevent panic on passing it to UnmarshalText() + if len(b) < 2 { + return ErrInvalidID + } + return id.UnmarshalText(b[1 : len(b)-1]) +} + +// decode by unrolling the stdlib base32 algorithm + customized safe check. +func decode(id *ID, src []byte) bool { + _ = src[19] + _ = id[11] + + id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 + id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 + id[9] = dec[src[14]]<<5 | dec[src[15]] + id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 + id[7] = dec[src[11]]<<4 | dec[src[12]]>>1 + id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4 + id[5] = dec[src[8]]<<3 | dec[src[9]]>>2 + id[4] = dec[src[6]]<<5 | dec[src[7]] + id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3 + id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 + id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 + id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 + + // Validate that there are no discarer bits (padding) in src that would + // cause the string-encoded id not to equal src. + var check [4]byte + + check[3] = encoding[(id[11]<<4)&0x1F] + check[2] = encoding[(id[11]>>1)&0x1F] + check[1] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + check[0] = encoding[id[10]>>3] + return bytes.Equal([]byte(src[16:20]), check[:]) +} + +// Time returns the timestamp part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Time() time.Time { + // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. + secs := int64(binary.BigEndian.Uint32(id[0:4])) + return time.Unix(secs, 0) +} + +// Machine returns the 3-byte machine id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Machine() []byte { + return id[4:7] +} + +// Pid returns the process id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Pid() uint16 { + return binary.BigEndian.Uint16(id[7:9]) +} + +// Counter returns the incrementing value part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Counter() int32 { + b := id[9:12] + // Counter is stored as big-endian 3-byte value + return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) +} + +// Value implements the driver.Valuer interface. +func (id ID) Value() (driver.Value, error) { + if id.IsNil() { + return nil, nil + } + b, err := id.MarshalText() + return string(b), err +} + +// Scan implements the sql.Scanner interface. +func (id *ID) Scan(value interface{}) (err error) { + switch val := value.(type) { + case string: + return id.UnmarshalText([]byte(val)) + case []byte: + return id.UnmarshalText(val) + case nil: + *id = nilID + return nil + default: + return fmt.Errorf("xid: scanning unsupported type: %T", value) + } +} + +// IsNil Returns true if this is a "nil" ID +func (id ID) IsNil() bool { + return id == nilID +} + +// NilID returns a zero value for `xid.ID`. +func NilID() ID { + return nilID +} + +// Bytes returns the byte array representation of `ID` +func (id ID) Bytes() []byte { + return id[:] +} + +// FromBytes convert the byte array representation of `ID` back to `ID` +func FromBytes(b []byte) (ID, error) { + var id ID + if len(b) != rawLen { + return id, ErrInvalidID + } + copy(id[:], b) + return id, nil +} + +// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`. +// The result will be 0 if two IDs are identical, -1 if current id is less than the other one, +// and 1 if current id is greater than the other. +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) +} + +type sorter []ID + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Less(i, j int) bool { + return s[i].Compare(s[j]) < 0 +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Sort sorts an array of IDs inplace. +// It works by wrapping `[]ID` and use `sort.Sort`. +func Sort(ids []ID) { + sort.Sort(sorter(ids)) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index fa1245b1..2924cf3a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,7 +8,6 @@ import ( "fmt" "math" "os" - "path/filepath" "reflect" "regexp" "runtime" @@ -141,12 +140,11 @@ func CallerInfo() []string { } parts := strings.Split(file, "/") - file = parts[len(parts)-1] if len(parts) > 1 { + filename := parts[len(parts)-1] dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - path, _ := filepath.Abs(file) - callers = append(callers, fmt.Sprintf("%s:%d", path, line)) + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) } } @@ -530,7 +528,7 @@ func isNil(object interface{}) bool { []reflect.Kind{ reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, kind) if isNilableKind && value.IsNil() { @@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + if !av.IsValid() { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) } } return true } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) } } @@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { + if !av.IsValid() { + return true + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { return true } } @@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) diff --git a/vendor/github.com/tinylib/msgp/LICENSE b/vendor/github.com/tinylib/msgp/LICENSE new file mode 100644 index 00000000..14d60424 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2014 Philip Hofer +Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go new file mode 100644 index 00000000..6c6bb37a --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go @@ -0,0 +1,24 @@ +// +build linux,!appengine + +package msgp + +import ( + "os" + "syscall" +) + +func adviseRead(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) +} + +func adviseWrite(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL) +} + +func fallocate(f *os.File, sz int64) error { + err := syscall.Fallocate(int(f.Fd()), 0, 0, sz) + if err == syscall.ENOTSUP { + return f.Truncate(sz) + } + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go new file mode 100644 index 00000000..da65ea54 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go @@ -0,0 +1,17 @@ +// +build !linux appengine + +package msgp + +import ( + "os" +) + +// TODO: darwin, BSD support + +func adviseRead(mem []byte) {} + +func adviseWrite(mem []byte) {} + +func fallocate(f *os.File, sz int64) error { + return f.Truncate(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go new file mode 100644 index 00000000..a0434c7e --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/circular.go @@ -0,0 +1,39 @@ +package msgp + +type timer interface { + StartTimer() + StopTimer() +} + +// EndlessReader is an io.Reader +// that loops over the same data +// endlessly. It is used for benchmarking. +type EndlessReader struct { + tb timer + data []byte + offset int +} + +// NewEndlessReader returns a new endless reader +func NewEndlessReader(b []byte, tb timer) *EndlessReader { + return &EndlessReader{tb: tb, data: b, offset: 0} +} + +// Read implements io.Reader. In practice, it +// always returns (len(p), nil), although it +// fills the supplied slice while the benchmark +// timer is stopped. +func (c *EndlessReader) Read(p []byte) (int, error) { + c.tb.StopTimer() + var n int + l := len(p) + m := len(c.data) + for n < l { + nn := copy(p[n:], c.data[c.offset:]) + n += nn + c.offset += nn + c.offset %= m + } + c.tb.StartTimer() + return n, nil +} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go new file mode 100644 index 00000000..c634eef1 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/defs.go @@ -0,0 +1,142 @@ +// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp). +// +// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack +// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code +// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces. +// +// This package defines four "families" of functions: +// - AppendXxxx() appends an object to a []byte in MessagePack encoding. +// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes. +// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type. +// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type. +// +// Once a type has satisfied the `Encodable` and `Decodable` interfaces, +// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using +// msgp.Encode(io.Writer, msgp.Encodable) +// and +// msgp.Decode(io.Reader, msgp.Decodable) +// +// There are also methods for converting MessagePack to JSON without +// an explicit de-serialization step. +// +// For additional tips, tricks, and gotchas, please visit +// the wiki at http://github.com/tinylib/msgp +package msgp + +const last4 = 0x0f +const first4 = 0xf0 +const last5 = 0x1f +const first3 = 0xe0 +const last7 = 0x7f + +func isfixint(b byte) bool { + return b>>7 == 0 +} + +func isnfixint(b byte) bool { + return b&first3 == mnfixint +} + +func isfixmap(b byte) bool { + return b&first4 == mfixmap +} + +func isfixarray(b byte) bool { + return b&first4 == mfixarray +} + +func isfixstr(b byte) bool { + return b&first3 == mfixstr +} + +func wfixint(u uint8) byte { + return u & last7 +} + +func rfixint(b byte) uint8 { + return b +} + +func wnfixint(i int8) byte { + return byte(i) | mnfixint +} + +func rnfixint(b byte) int8 { + return int8(b) +} + +func rfixmap(b byte) uint8 { + return b & last4 +} + +func wfixmap(u uint8) byte { + return mfixmap | (u & last4) +} + +func rfixstr(b byte) uint8 { + return b & last5 +} + +func wfixstr(u uint8) byte { + return (u & last5) | mfixstr +} + +func rfixarray(b byte) uint8 { + return (b & last4) +} + +func wfixarray(u uint8) byte { + return (u & last4) | mfixarray +} + +// These are all the byte +// prefixes defined by the +// msgpack standard +const ( + // 0XXXXXXX + mfixint uint8 = 0x00 + + // 111XXXXX + mnfixint uint8 = 0xe0 + + // 1000XXXX + mfixmap uint8 = 0x80 + + // 1001XXXX + mfixarray uint8 = 0x90 + + // 101XXXXX + mfixstr uint8 = 0xa0 + + mnil uint8 = 0xc0 + mfalse uint8 = 0xc2 + mtrue uint8 = 0xc3 + mbin8 uint8 = 0xc4 + mbin16 uint8 = 0xc5 + mbin32 uint8 = 0xc6 + mext8 uint8 = 0xc7 + mext16 uint8 = 0xc8 + mext32 uint8 = 0xc9 + mfloat32 uint8 = 0xca + mfloat64 uint8 = 0xcb + muint8 uint8 = 0xcc + muint16 uint8 = 0xcd + muint32 uint8 = 0xce + muint64 uint8 = 0xcf + mint8 uint8 = 0xd0 + mint16 uint8 = 0xd1 + mint32 uint8 = 0xd2 + mint64 uint8 = 0xd3 + mfixext1 uint8 = 0xd4 + mfixext2 uint8 = 0xd5 + mfixext4 uint8 = 0xd6 + mfixext8 uint8 = 0xd7 + mfixext16 uint8 = 0xd8 + mstr8 uint8 = 0xd9 + mstr16 uint8 = 0xda + mstr32 uint8 = 0xdb + marray16 uint8 = 0xdc + marray32 uint8 = 0xdd + mmap16 uint8 = 0xde + mmap32 uint8 = 0xdf +) diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go new file mode 100644 index 00000000..b473a6f6 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/edit.go @@ -0,0 +1,242 @@ +package msgp + +import ( + "math" +) + +// Locate returns a []byte pointing to the field +// in a messagepack map with the provided key. (The returned []byte +// points to a sub-slice of 'raw'; Locate does no allocations.) If the +// key doesn't exist in the map, a zero-length []byte will be returned. +func Locate(key string, raw []byte) []byte { + s, n := locate(raw, key) + return raw[s:n] +} + +// Replace takes a key ("key") in a messagepack map ("raw") +// and replaces its value with the one provided and returns +// the new []byte. The returned []byte may point to the same +// memory as "raw". Replace makes no effort to evaluate the validity +// of the contents of 'val'. It may use up to the full capacity of 'raw.' +// Replace returns 'nil' if the field doesn't exist or if the object in 'raw' +// is not a map. +func Replace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, true) +} + +// CopyReplace works similarly to Replace except that the returned +// byte slice does not point to the same memory as 'raw'. CopyReplace +// returns 'nil' if the field doesn't exist or 'raw' isn't a map. +func CopyReplace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, false) +} + +// Remove removes a key-value pair from 'raw'. It returns +// 'raw' unchanged if the key didn't exist. +func Remove(key string, raw []byte) []byte { + start, end := locateKV(raw, key) + if start == end { + return raw + } + raw = raw[:start+copy(raw[start:], raw[end:])] + return resizeMap(raw, -1) +} + +// HasKey returns whether the map in 'raw' has +// a field with key 'key' +func HasKey(key string, raw []byte) bool { + sz, bts, err := ReadMapHeaderBytes(raw) + if err != nil { + return false + } + var field []byte + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return false + } + if UnsafeString(field) == key { + return true + } + } + return false +} + +func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte { + ll := end - start // length of segment to replace + lv := len(val) + + if inplace { + extra := lv - ll + + // fastest case: we're doing + // a 1:1 replacement + if extra == 0 { + copy(raw[start:], val) + return raw + + } else if extra < 0 { + // 'val' smaller than replaced value + // copy in place and shift back + + x := copy(raw[start:], val) + y := copy(raw[start+x:], raw[end:]) + return raw[:start+x+y] + + } else if extra < cap(raw)-len(raw) { + // 'val' less than (cap-len) extra bytes + // copy in place and shift forward + raw = raw[0 : len(raw)+extra] + // shift end forward + copy(raw[end+extra:], raw[end:]) + copy(raw[start:], val) + return raw + } + } + + // we have to allocate new space + out := make([]byte, len(raw)+len(val)-ll) + x := copy(out, raw[:start]) + y := copy(out[x:], val) + copy(out[x+y:], raw[end:]) + return out +} + +// locate does a naive O(n) search for the map key; returns start, end +// (returns 0,0 on error) +func locate(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return + } + + // loop and locate field + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + // start location + l := len(raw) + start = l - len(bts) + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = l - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// locate key AND value +func locateKV(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return 0, 0 + } + + for i := uint32(0); i < sz; i++ { + tmp := len(bts) + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + start = len(raw) - tmp + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = len(raw) - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// delta is delta on map size +func resizeMap(raw []byte, delta int64) []byte { + var sz int64 + switch raw[0] { + case mmap16: + sz = int64(big.Uint16(raw[1:])) + if sz+delta <= math.MaxUint16 { + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[5:], raw[3:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[3:]...) + + case mmap32: + sz = int64(big.Uint32(raw[1:])) + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + + default: + sz = int64(rfixmap(raw[0])) + if sz+delta < 16 { + raw[0] = wfixmap(uint8(sz + delta)) + return raw + } else if sz+delta <= math.MaxUint16 { + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[3:], raw[1:]) + raw[0] = mmap16 + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } + if cap(raw)-len(raw) >= 4 { + raw = raw[0 : len(raw)+4] + copy(raw[5:], raw[1:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go new file mode 100644 index 00000000..95762e7e --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/elsize.go @@ -0,0 +1,99 @@ +package msgp + +// size of every object on the wire, +// plus type information. gives us +// constant-time type information +// for traversing composite objects. +// +var sizes = [256]bytespec{ + mnil: {size: 1, extra: constsize, typ: NilType}, + mfalse: {size: 1, extra: constsize, typ: BoolType}, + mtrue: {size: 1, extra: constsize, typ: BoolType}, + mbin8: {size: 2, extra: extra8, typ: BinType}, + mbin16: {size: 3, extra: extra16, typ: BinType}, + mbin32: {size: 5, extra: extra32, typ: BinType}, + mext8: {size: 3, extra: extra8, typ: ExtensionType}, + mext16: {size: 4, extra: extra16, typ: ExtensionType}, + mext32: {size: 6, extra: extra32, typ: ExtensionType}, + mfloat32: {size: 5, extra: constsize, typ: Float32Type}, + mfloat64: {size: 9, extra: constsize, typ: Float64Type}, + muint8: {size: 2, extra: constsize, typ: UintType}, + muint16: {size: 3, extra: constsize, typ: UintType}, + muint32: {size: 5, extra: constsize, typ: UintType}, + muint64: {size: 9, extra: constsize, typ: UintType}, + mint8: {size: 2, extra: constsize, typ: IntType}, + mint16: {size: 3, extra: constsize, typ: IntType}, + mint32: {size: 5, extra: constsize, typ: IntType}, + mint64: {size: 9, extra: constsize, typ: IntType}, + mfixext1: {size: 3, extra: constsize, typ: ExtensionType}, + mfixext2: {size: 4, extra: constsize, typ: ExtensionType}, + mfixext4: {size: 6, extra: constsize, typ: ExtensionType}, + mfixext8: {size: 10, extra: constsize, typ: ExtensionType}, + mfixext16: {size: 18, extra: constsize, typ: ExtensionType}, + mstr8: {size: 2, extra: extra8, typ: StrType}, + mstr16: {size: 3, extra: extra16, typ: StrType}, + mstr32: {size: 5, extra: extra32, typ: StrType}, + marray16: {size: 3, extra: array16v, typ: ArrayType}, + marray32: {size: 5, extra: array32v, typ: ArrayType}, + mmap16: {size: 3, extra: map16v, typ: MapType}, + mmap32: {size: 5, extra: map32v, typ: MapType}, +} + +func init() { + // set up fixed fields + + // fixint + for i := mfixint; i < 0x80; i++ { + sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // nfixint + for i := uint16(mnfixint); i < 0x100; i++ { + sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // fixstr gets constsize, + // since the prefix yields the size + for i := mfixstr; i < 0xc0; i++ { + sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType} + } + + // fixmap + for i := mfixmap; i < 0x90; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType} + } + + // fixarray + for i := mfixarray; i < 0xa0; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType} + } +} + +// a valid bytespsec has +// non-zero 'size' and +// non-zero 'typ' +type bytespec struct { + size uint8 // prefix size information + extra varmode // extra size information + typ Type // type + _ byte // makes bytespec 4 bytes (yes, this matters) +} + +// size mode +// if positive, # elements for composites +type varmode int8 + +const ( + constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects) + extra8 = -1 // has uint8(p[1]) extra bytes + extra16 = -2 // has be16(p[1:]) extra bytes + extra32 = -3 // has be32(p[1:]) extra bytes + map16v = -4 // use map16 + map32v = -5 // use map32 + array16v = -6 // use array16 + array32v = -7 // use array32 +) + +func getType(v byte) Type { + return sizes[v].typ +} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go new file mode 100644 index 00000000..921e8553 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/errors.go @@ -0,0 +1,317 @@ +package msgp + +import ( + "fmt" + "reflect" +) + +const resumableDefault = false + +var ( + // ErrShortBytes is returned when the + // slice being decoded is too short to + // contain the contents of the message + ErrShortBytes error = errShort{} + + // this error is only returned + // if we reach code that should + // be unreachable + fatal error = errFatal{} +) + +// Error is the interface satisfied +// by all of the errors that originate +// from this package. +type Error interface { + error + + // Resumable returns whether + // or not the error means that + // the stream of data is malformed + // and the information is unrecoverable. + Resumable() bool +} + +// contextError allows msgp Error instances to be enhanced with additional +// context about their origin. +type contextError interface { + Error + + // withContext must not modify the error instance - it must clone and + // return a new error with the context added. + withContext(ctx string) error +} + +// Cause returns the underlying cause of an error that has been wrapped +// with additional context. +func Cause(e error) error { + out := e + if e, ok := e.(errWrapped); ok && e.cause != nil { + out = e.cause + } + return out +} + +// Resumable returns whether or not the error means that the stream of data is +// malformed and the information is unrecoverable. +func Resumable(e error) bool { + if e, ok := e.(Error); ok { + return e.Resumable() + } + return resumableDefault +} + +// WrapError wraps an error with additional context that allows the part of the +// serialized type that caused the problem to be identified. Underlying errors +// can be retrieved using Cause() +// +// The input error is not modified - a new error should be returned. +// +// ErrShortBytes is not wrapped with any context due to backward compatibility +// issues with the public API. +// +func WrapError(err error, ctx ...interface{}) error { + switch e := err.(type) { + case errShort: + return e + case contextError: + return e.withContext(ctxString(ctx)) + default: + return errWrapped{cause: err, ctx: ctxString(ctx)} + } +} + +// ctxString converts the incoming interface{} slice into a single string. +func ctxString(ctx []interface{}) string { + out := "" + for idx, cv := range ctx { + if idx > 0 { + out += "/" + } + out += fmt.Sprintf("%v", cv) + } + return out +} + +func addCtx(ctx, add string) string { + if ctx != "" { + return add + "/" + ctx + } else { + return add + } +} + +// errWrapped allows arbitrary errors passed to WrapError to be enhanced with +// context and unwrapped with Cause() +type errWrapped struct { + cause error + ctx string +} + +func (e errWrapped) Error() string { + if e.ctx != "" { + return fmt.Sprintf("%s at %s", e.cause, e.ctx) + } else { + return e.cause.Error() + } +} + +func (e errWrapped) Resumable() bool { + if e, ok := e.cause.(Error); ok { + return e.Resumable() + } + return resumableDefault +} + +// Unwrap returns the cause. +func (e errWrapped) Unwrap() error { return e.cause } + +type errShort struct{} + +func (e errShort) Error() string { return "msgp: too few bytes left to read object" } +func (e errShort) Resumable() bool { return false } + +type errFatal struct { + ctx string +} + +func (f errFatal) Error() string { + out := "msgp: fatal decoding error (unreachable code)" + if f.ctx != "" { + out += " at " + f.ctx + } + return out +} + +func (f errFatal) Resumable() bool { return false } + +func (f errFatal) withContext(ctx string) error { f.ctx = addCtx(f.ctx, ctx); return f } + +// ArrayError is an error returned +// when decoding a fix-sized array +// of the wrong size +type ArrayError struct { + Wanted uint32 + Got uint32 + ctx string +} + +// Error implements the error interface +func (a ArrayError) Error() string { + out := fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got) + if a.ctx != "" { + out += " at " + a.ctx + } + return out +} + +// Resumable is always 'true' for ArrayErrors +func (a ArrayError) Resumable() bool { return true } + +func (a ArrayError) withContext(ctx string) error { a.ctx = addCtx(a.ctx, ctx); return a } + +// IntOverflow is returned when a call +// would downcast an integer to a type +// with too few bits to hold its value. +type IntOverflow struct { + Value int64 // the value of the integer + FailedBitsize int // the bit size that the int64 could not fit into + ctx string +} + +// Error implements the error interface +func (i IntOverflow) Error() string { + str := fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize) + if i.ctx != "" { + str += " at " + i.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (i IntOverflow) Resumable() bool { return true } + +func (i IntOverflow) withContext(ctx string) error { i.ctx = addCtx(i.ctx, ctx); return i } + +// UintOverflow is returned when a call +// would downcast an unsigned integer to a type +// with too few bits to hold its value +type UintOverflow struct { + Value uint64 // value of the uint + FailedBitsize int // the bit size that couldn't fit the value + ctx string +} + +// Error implements the error interface +func (u UintOverflow) Error() string { + str := fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize) + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u UintOverflow) Resumable() bool { return true } + +func (u UintOverflow) withContext(ctx string) error { u.ctx = addCtx(u.ctx, ctx); return u } + +// UintBelowZero is returned when a call +// would cast a signed integer below zero +// to an unsigned integer. +type UintBelowZero struct { + Value int64 // value of the incoming int + ctx string +} + +// Error implements the error interface +func (u UintBelowZero) Error() string { + str := fmt.Sprintf("msgp: attempted to cast int %d to unsigned", u.Value) + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u UintBelowZero) Resumable() bool { return true } + +func (u UintBelowZero) withContext(ctx string) error { + u.ctx = ctx + return u +} + +// A TypeError is returned when a particular +// decoding method is unsuitable for decoding +// a particular MessagePack value. +type TypeError struct { + Method Type // Type expected by method + Encoded Type // Type actually encoded + + ctx string +} + +// Error implements the error interface +func (t TypeError) Error() string { + out := fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method) + if t.ctx != "" { + out += " at " + t.ctx + } + return out +} + +// Resumable returns 'true' for TypeErrors +func (t TypeError) Resumable() bool { return true } + +func (t TypeError) withContext(ctx string) error { t.ctx = addCtx(t.ctx, ctx); return t } + +// returns either InvalidPrefixError or +// TypeError depending on whether or not +// the prefix is recognized +func badPrefix(want Type, lead byte) error { + t := sizes[lead].typ + if t == InvalidType { + return InvalidPrefixError(lead) + } + return TypeError{Method: want, Encoded: t} +} + +// InvalidPrefixError is returned when a bad encoding +// uses a prefix that is not recognized in the MessagePack standard. +// This kind of error is unrecoverable. +type InvalidPrefixError byte + +// Error implements the error interface +func (i InvalidPrefixError) Error() string { + return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i)) +} + +// Resumable returns 'false' for InvalidPrefixErrors +func (i InvalidPrefixError) Resumable() bool { return false } + +// ErrUnsupportedType is returned +// when a bad argument is supplied +// to a function that takes `interface{}`. +type ErrUnsupportedType struct { + T reflect.Type + + ctx string +} + +// Error implements error +func (e *ErrUnsupportedType) Error() string { + out := fmt.Sprintf("msgp: type %q not supported", e.T) + if e.ctx != "" { + out += " at " + e.ctx + } + return out +} + +// Resumable returns 'true' for ErrUnsupportedType +func (e *ErrUnsupportedType) Resumable() bool { return true } + +func (e *ErrUnsupportedType) withContext(ctx string) error { + o := *e + o.ctx = addCtx(o.ctx, ctx) + return &o +} diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go new file mode 100644 index 00000000..b2e11085 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/extension.go @@ -0,0 +1,549 @@ +package msgp + +import ( + "fmt" + "math" +) + +const ( + // Complex64Extension is the extension number used for complex64 + Complex64Extension = 3 + + // Complex128Extension is the extension number used for complex128 + Complex128Extension = 4 + + // TimeExtension is the extension number used for time.Time + TimeExtension = 5 +) + +// our extensions live here +var extensionReg = make(map[int8]func() Extension) + +// RegisterExtension registers extensions so that they +// can be initialized and returned by methods that +// decode `interface{}` values. This should only +// be called during initialization. f() should return +// a newly-initialized zero value of the extension. Keep in +// mind that extensions 3, 4, and 5 are reserved for +// complex64, complex128, and time.Time, respectively, +// and that MessagePack reserves extension types from -127 to -1. +// +// For example, if you wanted to register a user-defined struct: +// +// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} }) +// +// RegisterExtension will panic if you call it multiple times +// with the same 'typ' argument, or if you use a reserved +// type (3, 4, or 5). +func RegisterExtension(typ int8, f func() Extension) { + switch typ { + case Complex64Extension, Complex128Extension, TimeExtension: + panic(fmt.Sprint("msgp: forbidden extension type:", typ)) + } + if _, ok := extensionReg[typ]; ok { + panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once")) + } + extensionReg[typ] = f +} + +// ExtensionTypeError is an error type returned +// when there is a mis-match between an extension type +// and the type encoded on the wire +type ExtensionTypeError struct { + Got int8 + Want int8 +} + +// Error implements the error interface +func (e ExtensionTypeError) Error() string { + return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got) +} + +// Resumable returns 'true' for ExtensionTypeErrors +func (e ExtensionTypeError) Resumable() bool { return true } + +func errExt(got int8, wanted int8) error { + return ExtensionTypeError{Got: got, Want: wanted} +} + +// Extension is the interface fulfilled +// by types that want to define their +// own binary encoding. +type Extension interface { + // ExtensionType should return + // a int8 that identifies the concrete + // type of the extension. (Types <0 are + // officially reserved by the MessagePack + // specifications.) + ExtensionType() int8 + + // Len should return the length + // of the data to be encoded + Len() int + + // MarshalBinaryTo should copy + // the data into the supplied slice, + // assuming that the slice has length Len() + MarshalBinaryTo([]byte) error + + UnmarshalBinary([]byte) error +} + +// RawExtension implements the Extension interface +type RawExtension struct { + Data []byte + Type int8 +} + +// ExtensionType implements Extension.ExtensionType, and returns r.Type +func (r *RawExtension) ExtensionType() int8 { return r.Type } + +// Len implements Extension.Len, and returns len(r.Data) +func (r *RawExtension) Len() int { return len(r.Data) } + +// MarshalBinaryTo implements Extension.MarshalBinaryTo, +// and returns a copy of r.Data +func (r *RawExtension) MarshalBinaryTo(d []byte) error { + copy(d, r.Data) + return nil +} + +// UnmarshalBinary implements Extension.UnmarshalBinary, +// and sets r.Data to the contents of the provided slice +func (r *RawExtension) UnmarshalBinary(b []byte) error { + if cap(r.Data) >= len(b) { + r.Data = r.Data[0:len(b)] + } else { + r.Data = make([]byte, len(b)) + } + copy(r.Data, b) + return nil +} + +// WriteExtension writes an extension type to the writer +func (mw *Writer) WriteExtension(e Extension) error { + l := e.Len() + var err error + switch l { + case 0: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 0 + mw.buf[o+2] = byte(e.ExtensionType()) + case 1: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext1 + mw.buf[o+1] = byte(e.ExtensionType()) + case 2: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext2 + mw.buf[o+1] = byte(e.ExtensionType()) + case 4: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext4 + mw.buf[o+1] = byte(e.ExtensionType()) + case 8: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = byte(e.ExtensionType()) + case 16: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = byte(e.ExtensionType()) + default: + switch { + case l < math.MaxUint8: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = byte(uint8(l)) + mw.buf[o+2] = byte(e.ExtensionType()) + case l < math.MaxUint16: + o, err := mw.require(4) + if err != nil { + return err + } + mw.buf[o] = mext16 + big.PutUint16(mw.buf[o+1:], uint16(l)) + mw.buf[o+3] = byte(e.ExtensionType()) + default: + o, err := mw.require(6) + if err != nil { + return err + } + mw.buf[o] = mext32 + big.PutUint32(mw.buf[o+1:], uint32(l)) + mw.buf[o+5] = byte(e.ExtensionType()) + } + } + // we can only write directly to the + // buffer if we're sure that it + // fits the object + if l <= mw.bufsize() { + o, err := mw.require(l) + if err != nil { + return err + } + return e.MarshalBinaryTo(mw.buf[o:]) + } + // here we create a new buffer + // just large enough for the body + // and save it as the write buffer + err = mw.flush() + if err != nil { + return err + } + buf := make([]byte, l) + err = e.MarshalBinaryTo(buf) + if err != nil { + return err + } + mw.buf = buf + mw.wloc = l + return nil +} + +// peek at the extension type, assuming the next +// kind to be read is Extension +func (m *Reader) peekExtensionType() (int8, error) { + p, err := m.R.Peek(2) + if err != nil { + return 0, err + } + spec := sizes[p[0]] + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, p[0]) + } + if spec.extra == constsize { + return int8(p[1]), nil + } + size := spec.size + p, err = m.R.Peek(int(size)) + if err != nil { + return 0, err + } + return int8(p[size-1]), nil +} + +// peekExtension peeks at the extension encoding type +// (must guarantee at least 1 byte in 'b') +func peekExtension(b []byte) (int8, error) { + spec := sizes[b[0]] + size := spec.size + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, b[0]) + } + if len(b) < int(size) { + return 0, ErrShortBytes + } + // for fixed extensions, + // the type information is in + // the second byte + if spec.extra == constsize { + return int8(b[1]), nil + } + // otherwise, it's in the last + // part of the prefix + return int8(b[size-1]), nil +} + +// ReadExtension reads the next object from the reader +// as an extension. ReadExtension will fail if the next +// object in the stream is not an extension, or if +// e.Type() is not the same as the wire type. +func (m *Reader) ReadExtension(e Extension) (err error) { + var p []byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead := p[0] + var read int + var off int + switch lead { + case mfixext1: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(3) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(3) + } + return + + case mfixext2: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(4) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(4) + } + return + + case mfixext4: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(6) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(6) + } + return + + case mfixext8: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(10) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(10) + } + return + + case mfixext16: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(18) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(18) + } + return + + case mext8: + p, err = m.R.Peek(3) + if err != nil { + return + } + if int8(p[2]) != e.ExtensionType() { + err = errExt(int8(p[2]), e.ExtensionType()) + return + } + read = int(uint8(p[1])) + off = 3 + + case mext16: + p, err = m.R.Peek(4) + if err != nil { + return + } + if int8(p[3]) != e.ExtensionType() { + err = errExt(int8(p[3]), e.ExtensionType()) + return + } + read = int(big.Uint16(p[1:])) + off = 4 + + case mext32: + p, err = m.R.Peek(6) + if err != nil { + return + } + if int8(p[5]) != e.ExtensionType() { + err = errExt(int8(p[5]), e.ExtensionType()) + return + } + read = int(big.Uint32(p[1:])) + off = 6 + + default: + err = badPrefix(ExtensionType, lead) + return + } + + p, err = m.R.Peek(read + off) + if err != nil { + return + } + err = e.UnmarshalBinary(p[off:]) + if err == nil { + _, err = m.R.Skip(read + off) + } + return +} + +// AppendExtension appends a MessagePack extension to the provided slice +func AppendExtension(b []byte, e Extension) ([]byte, error) { + l := e.Len() + var o []byte + var n int + switch l { + case 0: + o, n = ensure(b, 3) + o[n] = mext8 + o[n+1] = 0 + o[n+2] = byte(e.ExtensionType()) + return o[:n+3], nil + case 1: + o, n = ensure(b, 3) + o[n] = mfixext1 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 2: + o, n = ensure(b, 4) + o[n] = mfixext2 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 4: + o, n = ensure(b, 6) + o[n] = mfixext4 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 8: + o, n = ensure(b, 10) + o[n] = mfixext8 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 16: + o, n = ensure(b, 18) + o[n] = mfixext16 + o[n+1] = byte(e.ExtensionType()) + n += 2 + default: + switch { + case l < math.MaxUint8: + o, n = ensure(b, l+3) + o[n] = mext8 + o[n+1] = byte(uint8(l)) + o[n+2] = byte(e.ExtensionType()) + n += 3 + case l < math.MaxUint16: + o, n = ensure(b, l+4) + o[n] = mext16 + big.PutUint16(o[n+1:], uint16(l)) + o[n+3] = byte(e.ExtensionType()) + n += 4 + default: + o, n = ensure(b, l+6) + o[n] = mext32 + big.PutUint32(o[n+1:], uint32(l)) + o[n+5] = byte(e.ExtensionType()) + n += 6 + } + } + return o, e.MarshalBinaryTo(o[n:]) +} + +// ReadExtensionBytes reads an extension from 'b' into 'e' +// and returns any remaining bytes. +// Possible errors: +// - ErrShortBytes ('b' not long enough) +// - ExtensionTypeError{} (wire type not the same as e.Type()) +// - TypeError{} (next object not an extension) +// - InvalidPrefixError +// - An umarshal error returned from e.UnmarshalBinary +func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) { + l := len(b) + if l < 3 { + return b, ErrShortBytes + } + lead := b[0] + var ( + sz int // size of 'data' + off int // offset of 'data' + typ int8 + ) + switch lead { + case mfixext1: + typ = int8(b[1]) + sz = 1 + off = 2 + case mfixext2: + typ = int8(b[1]) + sz = 2 + off = 2 + case mfixext4: + typ = int8(b[1]) + sz = 4 + off = 2 + case mfixext8: + typ = int8(b[1]) + sz = 8 + off = 2 + case mfixext16: + typ = int8(b[1]) + sz = 16 + off = 2 + case mext8: + sz = int(uint8(b[1])) + typ = int8(b[2]) + off = 3 + if sz == 0 { + return b[3:], e.UnmarshalBinary(b[3:3]) + } + case mext16: + if l < 4 { + return b, ErrShortBytes + } + sz = int(big.Uint16(b[1:])) + typ = int8(b[3]) + off = 4 + case mext32: + if l < 6 { + return b, ErrShortBytes + } + sz = int(big.Uint32(b[1:])) + typ = int8(b[5]) + off = 6 + default: + return b, badPrefix(ExtensionType, lead) + } + + if typ != e.ExtensionType() { + return b, errExt(typ, e.ExtensionType()) + } + + // the data of the extension starts + // at 'off' and is 'sz' bytes long + if len(b[off:]) < sz { + return b, ErrShortBytes + } + tot := off + sz + return b[tot:], e.UnmarshalBinary(b[off:tot]) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go new file mode 100644 index 00000000..8e7370eb --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file.go @@ -0,0 +1,92 @@ +// +build linux darwin dragonfly freebsd netbsd openbsd +// +build !appengine + +package msgp + +import ( + "os" + "syscall" +) + +// ReadFile reads a file into 'dst' using +// a read-only memory mapping. Consequently, +// the file must be mmap-able, and the +// Unmarshaler should never write to +// the source memory. (Methods generated +// by the msgp tool obey that constraint, but +// user-defined implementations may not.) +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +func ReadFile(dst Unmarshaler, file *os.File) error { + stat, err := file.Stat() + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseRead(data) + _, err = dst.UnmarshalMsg(data) + uerr := syscall.Munmap(data) + if err == nil { + err = uerr + } + return err +} + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +// WriteFile writes a file from 'src' using +// memory mapping. It overwrites the entire +// contents of the previous file. +// The mapping size is calculated +// using the `Msgsize()` method +// of 'src', so it must produce a result +// equal to or greater than the actual encoded +// size of the object. Otherwise, +// a fault (SIGBUS) will occur. +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +// NOTE: The performance of this call +// is highly OS- and filesystem-dependent. +// Users should take care to test that this +// performs as expected in a production environment. +// (Linux users should run a kernel and filesystem +// that support fallocate(2) for the best results.) +func WriteFile(src MarshalSizer, file *os.File) error { + sz := src.Msgsize() + err := fallocate(file, int64(sz)) + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseWrite(data) + chunk := data[:0] + chunk, err = src.MarshalMsg(chunk) + if err != nil { + return err + } + uerr := syscall.Munmap(data) + if uerr != nil { + return uerr + } + return file.Truncate(int64(len(chunk))) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go new file mode 100644 index 00000000..6e654dbd --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go @@ -0,0 +1,47 @@ +// +build windows appengine + +package msgp + +import ( + "io/ioutil" + "os" +) + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +func ReadFile(dst Unmarshaler, file *os.File) error { + if u, ok := dst.(Decodable); ok { + return u.DecodeMsg(NewReader(file)) + } + + data, err := ioutil.ReadAll(file) + if err != nil { + return err + } + _, err = dst.UnmarshalMsg(data) + return err +} + +func WriteFile(src MarshalSizer, file *os.File) error { + if e, ok := src.(Encodable); ok { + w := NewWriter(file) + err := e.EncodeMsg(w) + if err == nil { + err = w.Flush() + } + return err + } + + raw, err := src.MarshalMsg(nil) + if err != nil { + return err + } + _, err = file.Write(raw) + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go new file mode 100644 index 00000000..f817d775 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/integers.go @@ -0,0 +1,174 @@ +package msgp + +/* ---------------------------------- + integer encoding utilities + (inline-able) + + TODO(tinylib): there are faster, + albeit non-portable solutions + to the code below. implement + byteswap? + ---------------------------------- */ + +func putMint64(b []byte, i int64) { + b[0] = mint64 + b[1] = byte(i >> 56) + b[2] = byte(i >> 48) + b[3] = byte(i >> 40) + b[4] = byte(i >> 32) + b[5] = byte(i >> 24) + b[6] = byte(i >> 16) + b[7] = byte(i >> 8) + b[8] = byte(i) +} + +func getMint64(b []byte) int64 { + return (int64(b[1]) << 56) | (int64(b[2]) << 48) | + (int64(b[3]) << 40) | (int64(b[4]) << 32) | + (int64(b[5]) << 24) | (int64(b[6]) << 16) | + (int64(b[7]) << 8) | (int64(b[8])) +} + +func putMint32(b []byte, i int32) { + b[0] = mint32 + b[1] = byte(i >> 24) + b[2] = byte(i >> 16) + b[3] = byte(i >> 8) + b[4] = byte(i) +} + +func getMint32(b []byte) int32 { + return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) +} + +func putMint16(b []byte, i int16) { + b[0] = mint16 + b[1] = byte(i >> 8) + b[2] = byte(i) +} + +func getMint16(b []byte) (i int16) { + return (int16(b[1]) << 8) | int16(b[2]) +} + +func putMint8(b []byte, i int8) { + b[0] = mint8 + b[1] = byte(i) +} + +func getMint8(b []byte) (i int8) { + return int8(b[1]) +} + +func putMuint64(b []byte, u uint64) { + b[0] = muint64 + b[1] = byte(u >> 56) + b[2] = byte(u >> 48) + b[3] = byte(u >> 40) + b[4] = byte(u >> 32) + b[5] = byte(u >> 24) + b[6] = byte(u >> 16) + b[7] = byte(u >> 8) + b[8] = byte(u) +} + +func getMuint64(b []byte) uint64 { + return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | + (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | + (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | + (uint64(b[7]) << 8) | (uint64(b[8])) +} + +func putMuint32(b []byte, u uint32) { + b[0] = muint32 + b[1] = byte(u >> 24) + b[2] = byte(u >> 16) + b[3] = byte(u >> 8) + b[4] = byte(u) +} + +func getMuint32(b []byte) uint32 { + return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) +} + +func putMuint16(b []byte, u uint16) { + b[0] = muint16 + b[1] = byte(u >> 8) + b[2] = byte(u) +} + +func getMuint16(b []byte) uint16 { + return (uint16(b[1]) << 8) | uint16(b[2]) +} + +func putMuint8(b []byte, u uint8) { + b[0] = muint8 + b[1] = byte(u) +} + +func getMuint8(b []byte) uint8 { + return uint8(b[1]) +} + +func getUnix(b []byte) (sec int64, nsec int32) { + sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) | + (int64(b[2]) << 40) | (int64(b[3]) << 32) | + (int64(b[4]) << 24) | (int64(b[5]) << 16) | + (int64(b[6]) << 8) | (int64(b[7])) + + nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11])) + return +} + +func putUnix(b []byte, sec int64, nsec int32) { + b[0] = byte(sec >> 56) + b[1] = byte(sec >> 48) + b[2] = byte(sec >> 40) + b[3] = byte(sec >> 32) + b[4] = byte(sec >> 24) + b[5] = byte(sec >> 16) + b[6] = byte(sec >> 8) + b[7] = byte(sec) + b[8] = byte(nsec >> 24) + b[9] = byte(nsec >> 16) + b[10] = byte(nsec >> 8) + b[11] = byte(nsec) +} + +/* ----------------------------- + prefix utilities + ----------------------------- */ + +// write prefix and uint8 +func prefixu8(b []byte, pre byte, sz uint8) { + b[0] = pre + b[1] = byte(sz) +} + +// write prefix and big-endian uint16 +func prefixu16(b []byte, pre byte, sz uint16) { + b[0] = pre + b[1] = byte(sz >> 8) + b[2] = byte(sz) +} + +// write prefix and big-endian uint32 +func prefixu32(b []byte, pre byte, sz uint32) { + b[0] = pre + b[1] = byte(sz >> 24) + b[2] = byte(sz >> 16) + b[3] = byte(sz >> 8) + b[4] = byte(sz) +} + +func prefixu64(b []byte, pre byte, sz uint64) { + b[0] = pre + b[1] = byte(sz >> 56) + b[2] = byte(sz >> 48) + b[3] = byte(sz >> 40) + b[4] = byte(sz >> 32) + b[5] = byte(sz >> 24) + b[6] = byte(sz >> 16) + b[7] = byte(sz >> 8) + b[8] = byte(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go new file mode 100644 index 00000000..0e11e603 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json.go @@ -0,0 +1,568 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "unicode/utf8" +) + +var ( + null = []byte("null") + hex = []byte("0123456789abcdef") +) + +var defuns [_maxtype]func(jsWriter, *Reader) (int, error) + +// note: there is an initialization loop if +// this isn't set up during init() +func init() { + // since none of these functions are inline-able, + // there is not much of a penalty to the indirect + // call. however, this is best expressed as a jump-table... + defuns = [_maxtype]func(jsWriter, *Reader) (int, error){ + StrType: rwString, + BinType: rwBytes, + MapType: rwMap, + ArrayType: rwArray, + Float64Type: rwFloat64, + Float32Type: rwFloat32, + BoolType: rwBool, + IntType: rwInt, + UintType: rwUint, + NilType: rwNil, + ExtensionType: rwExtension, + Complex64Type: rwExtension, + Complex128Type: rwExtension, + TimeType: rwTime, + } +} + +// this is the interface +// used to write json +type jsWriter interface { + io.Writer + io.ByteWriter + WriteString(string) (int, error) +} + +// CopyToJSON reads MessagePack from 'src' and copies it +// as JSON to 'dst' until EOF. +func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) { + r := NewReader(src) + n, err = r.WriteToJSON(dst) + freeR(r) + return +} + +// WriteToJSON translates MessagePack from 'r' and writes it as +// JSON to 'w' until the underlying reader returns io.EOF. It returns +// the number of bytes written, and an error if it stopped before EOF. +func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) { + var j jsWriter + var bf *bufio.Writer + if jsw, ok := w.(jsWriter); ok { + j = jsw + } else { + bf = bufio.NewWriter(w) + j = bf + } + var nn int + for err == nil { + nn, err = rwNext(j, r) + n += int64(nn) + } + if err != io.EOF { + if bf != nil { + bf.Flush() + } + return + } + err = nil + if bf != nil { + err = bf.Flush() + } + return +} + +func rwNext(w jsWriter, src *Reader) (int, error) { + t, err := src.NextType() + if err != nil { + return 0, err + } + return defuns[t](w, src) +} + +func rwMap(dst jsWriter, src *Reader) (n int, err error) { + var comma bool + var sz uint32 + var field []byte + + sz, err = src.ReadMapHeader() + if err != nil { + return + } + + if sz == 0 { + return dst.WriteString("{}") + } + + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + var nn int + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + + field, err = src.ReadMapKeyPtr() + if err != nil { + return + } + nn, err = rwquoted(dst, field) + n += nn + if err != nil { + return + } + + err = dst.WriteByte(':') + if err != nil { + return + } + n++ + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + if !comma { + comma = true + } + } + + err = dst.WriteByte('}') + if err != nil { + return + } + n++ + return +} + +func rwArray(dst jsWriter, src *Reader) (n int, err error) { + err = dst.WriteByte('[') + if err != nil { + return + } + var sz uint32 + var nn int + sz, err = src.ReadArrayHeader() + if err != nil { + return + } + comma := false + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + comma = true + } + + err = dst.WriteByte(']') + if err != nil { + return + } + n++ + return +} + +func rwNil(dst jsWriter, src *Reader) (int, error) { + err := src.ReadNil() + if err != nil { + return 0, err + } + return dst.Write(null) +} + +func rwFloat32(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat32() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 32) + return dst.Write(src.scratch) +} + +func rwFloat64(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 64) + return dst.Write(src.scratch) +} + +func rwInt(dst jsWriter, src *Reader) (int, error) { + i, err := src.ReadInt64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendInt(src.scratch[:0], i, 10) + return dst.Write(src.scratch) +} + +func rwUint(dst jsWriter, src *Reader) (int, error) { + u, err := src.ReadUint64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendUint(src.scratch[:0], u, 10) + return dst.Write(src.scratch) +} + +func rwBool(dst jsWriter, src *Reader) (int, error) { + b, err := src.ReadBool() + if err != nil { + return 0, err + } + if b { + return dst.WriteString("true") + } + return dst.WriteString("false") +} + +func rwTime(dst jsWriter, src *Reader) (int, error) { + t, err := src.ReadTime() + if err != nil { + return 0, err + } + bts, err := t.MarshalJSON() + if err != nil { + return 0, err + } + return dst.Write(bts) +} + +func rwExtension(dst jsWriter, src *Reader) (n int, err error) { + et, err := src.peekExtensionType() + if err != nil { + return 0, err + } + + // registered extensions can override + // the JSON encoding + if j, ok := extensionReg[et]; ok { + var bts []byte + e := j() + err = src.ReadExtension(e) + if err != nil { + return + } + bts, err = json.Marshal(e) + if err != nil { + return + } + return dst.Write(bts) + } + + e := RawExtension{} + e.Type = et + err = src.ReadExtension(&e) + if err != nil { + return + } + + var nn int + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + + nn, err = dst.WriteString(`"type:"`) + n += nn + if err != nil { + return + } + + src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10) + nn, err = dst.Write(src.scratch) + n += nn + if err != nil { + return + } + + nn, err = dst.WriteString(`,"data":"`) + n += nn + if err != nil { + return + } + + enc := base64.NewEncoder(base64.StdEncoding, dst) + + nn, err = enc.Write(e.Data) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + nn, err = dst.WriteString(`"}`) + n += nn + return +} + +func rwString(dst jsWriter, src *Reader) (n int, err error) { + var p []byte + p, err = src.R.Peek(1) + if err != nil { + return + } + lead := p[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + src.R.Skip(1) + goto write + } + + switch lead { + case mstr8: + p, err = src.R.Next(2) + if err != nil { + return + } + read = int(uint8(p[1])) + case mstr16: + p, err = src.R.Next(3) + if err != nil { + return + } + read = int(big.Uint16(p[1:])) + case mstr32: + p, err = src.R.Next(5) + if err != nil { + return + } + read = int(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +write: + p, err = src.R.Next(read) + if err != nil { + return + } + n, err = rwquoted(dst, p) + return +} + +func rwBytes(dst jsWriter, src *Reader) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + src.scratch, err = src.ReadBytes(src.scratch[:0]) + if err != nil { + return + } + enc := base64.NewEncoder(base64.StdEncoding, dst) + nn, err = enc.Write(src.scratch) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} + +// Below (c) The Go Authors, 2009-2014 +// Subject to the BSD-style license found at http://golang.org +// +// see: encoding/json/encode.go:(*encodeState).stringbytes() +func rwquoted(dst jsWriter, s []byte) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + switch b { + case '\\', '"': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte(b) + if err != nil { + return + } + n++ + case '\n': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('n') + if err != nil { + return + } + n++ + case '\r': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('r') + if err != nil { + return + } + n++ + case '\t': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('t') + if err != nil { + return + } + n++ + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // It also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + nn, err = dst.WriteString(`\u00`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[b>>4]) + if err != nil { + return + } + n++ + err = dst.WriteByte(hex[b&0xF]) + if err != nil { + return + } + n++ + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + nn, err = dst.WriteString(`\ufffd`) + n += nn + if err != nil { + return + } + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + nn, err = dst.WriteString(`\u202`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[c&0xF]) + if err != nil { + return + } + n++ + i += size + start = i + continue + } + i += size + } + if start < len(s) { + nn, err = dst.Write(s[start:]) + n += nn + if err != nil { + return + } + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go new file mode 100644 index 00000000..438caf53 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go @@ -0,0 +1,363 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "time" +) + +var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error) + +func init() { + + // NOTE(pmh): this is best expressed as a jump table, + // but gc doesn't do that yet. revisit post-go1.5. + unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){ + StrType: rwStringBytes, + BinType: rwBytesBytes, + MapType: rwMapBytes, + ArrayType: rwArrayBytes, + Float64Type: rwFloat64Bytes, + Float32Type: rwFloat32Bytes, + BoolType: rwBoolBytes, + IntType: rwIntBytes, + UintType: rwUintBytes, + NilType: rwNullBytes, + ExtensionType: rwExtensionBytes, + Complex64Type: rwExtensionBytes, + Complex128Type: rwExtensionBytes, + TimeType: rwTimeBytes, + } +} + +// UnmarshalAsJSON takes raw messagepack and writes +// it as JSON to 'w'. If an error is returned, the +// bytes not translated will also be returned. If +// no errors are encountered, the length of the returned +// slice will be zero. +func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { + var ( + scratch []byte + cast bool + dst jsWriter + err error + ) + if jsw, ok := w.(jsWriter); ok { + dst = jsw + cast = true + } else { + dst = bufio.NewWriterSize(w, 512) + } + for len(msg) > 0 && err == nil { + msg, scratch, err = writeNext(dst, msg, scratch) + } + if !cast && err == nil { + err = dst.(*bufio.Writer).Flush() + } + return msg, err +} + +func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + if len(msg) < 1 { + return msg, scratch, ErrShortBytes + } + t := getType(msg[0]) + if t == InvalidType { + return msg, scratch, InvalidPrefixError(msg[0]) + } + if t == ExtensionType { + et, err := peekExtension(msg) + if err != nil { + return nil, scratch, err + } + if et == TimeExtension { + t = TimeType + } + } + return unfuns[t](w, msg, scratch) +} + +func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadArrayHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('[') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte(']') + return msg, scratch, err +} + +func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadMapHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('{') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = rwMapKeyBytes(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte(':') + if err != nil { + return msg, scratch, err + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte('}') + return msg, scratch, err +} + +func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, scratch, err := rwStringBytes(w, msg, scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return rwBytesBytes(w, msg, scratch) + } + } + return msg, scratch, err +} + +func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + str, msg, err := ReadStringZC(msg) + if err != nil { + return msg, scratch, err + } + _, err = rwquoted(w, str) + return msg, scratch, err +} + +func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + bts, msg, err := ReadBytesZC(msg) + if err != nil { + return msg, scratch, err + } + l := base64.StdEncoding.EncodedLen(len(bts)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, bts) + err = w.WriteByte('"') + if err != nil { + return msg, scratch, err + } + _, err = w.Write(scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('"') + return msg, scratch, err +} + +func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, err := ReadNilBytes(msg) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(null) + return msg, scratch, err +} + +func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + b, msg, err := ReadBoolBytes(msg) + if err != nil { + return msg, scratch, err + } + if b { + _, err = w.WriteString("true") + return msg, scratch, err + } + _, err = w.WriteString("false") + return msg, scratch, err +} + +func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + i, msg, err := ReadInt64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], i, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + u, msg, err := ReadUint64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendUint(scratch[0:0], u, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + var sz int + if f64 { + sz = 64 + f, msg, err = ReadFloat64Bytes(msg) + } else { + sz = 32 + var v float32 + v, msg, err = ReadFloat32Bytes(msg) + f = float64(v) + } + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float32 + var err error + f, msg, err = ReadFloat32Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + f, msg, err = ReadFloat64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var t time.Time + var err error + t, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := t.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err +} + +func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var err error + var et int8 + et, err = peekExtension(msg) + if err != nil { + return msg, scratch, err + } + + // if it's time.Time + if et == TimeExtension { + var tm time.Time + tm, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := tm.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // if the extension is registered, + // use its canonical JSON form + if f, ok := extensionReg[et]; ok { + e := f() + msg, err = ReadExtensionBytes(msg, e) + if err != nil { + return msg, scratch, err + } + bts, err := json.Marshal(e) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // otherwise, write `{"type": , "data": ""}` + r := RawExtension{} + r.Type = et + msg, err = ReadExtensionBytes(msg, &r) + if err != nil { + return msg, scratch, err + } + scratch, err = writeExt(w, r, scratch) + return msg, scratch, err +} + +func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) { + _, err := w.WriteString(`{"type":`) + if err != nil { + return scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`,"data":"`) + if err != nil { + return scratch, err + } + l := base64.StdEncoding.EncodedLen(len(r.Data)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, r.Data) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`"}`) + return scratch, err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go new file mode 100644 index 00000000..ad07ef99 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/number.go @@ -0,0 +1,267 @@ +package msgp + +import ( + "math" + "strconv" +) + +// The portable parts of the Number implementation + +// Number can be +// an int64, uint64, float32, +// or float64 internally. +// It can decode itself +// from any of the native +// messagepack number types. +// The zero-value of Number +// is Int(0). Using the equality +// operator with Number compares +// both the type and the value +// of the number. +type Number struct { + // internally, this + // is just a tagged union. + // the raw bits of the number + // are stored the same way regardless. + bits uint64 + typ Type +} + +// AsInt sets the number to an int64. +func (n *Number) AsInt(i int64) { + + // we always store int(0) + // as {0, InvalidType} in + // order to preserve + // the behavior of the == operator + if i == 0 { + n.typ = InvalidType + n.bits = 0 + return + } + + n.typ = IntType + n.bits = uint64(i) +} + +// AsUint sets the number to a uint64. +func (n *Number) AsUint(u uint64) { + n.typ = UintType + n.bits = u +} + +// AsFloat32 sets the value of the number +// to a float32. +func (n *Number) AsFloat32(f float32) { + n.typ = Float32Type + n.bits = uint64(math.Float32bits(f)) +} + +// AsFloat64 sets the value of the +// number to a float64. +func (n *Number) AsFloat64(f float64) { + n.typ = Float64Type + n.bits = math.Float64bits(f) +} + +// Int casts the number as an int64, and +// returns whether or not that was the +// underlying type. +func (n *Number) Int() (int64, bool) { + return int64(n.bits), n.typ == IntType || n.typ == InvalidType +} + +// Uint casts the number as a uint64, and returns +// whether or not that was the underlying type. +func (n *Number) Uint() (uint64, bool) { + return n.bits, n.typ == UintType +} + +// Float casts the number to a float64, and +// returns whether or not that was the underlying +// type (either a float64 or a float32). +func (n *Number) Float() (float64, bool) { + switch n.typ { + case Float32Type: + return float64(math.Float32frombits(uint32(n.bits))), true + case Float64Type: + return math.Float64frombits(n.bits), true + default: + return 0.0, false + } +} + +// Type will return one of: +// Float64Type, Float32Type, UintType, or IntType. +func (n *Number) Type() Type { + if n.typ == InvalidType { + return IntType + } + return n.typ +} + +// DecodeMsg implements msgp.Decodable +func (n *Number) DecodeMsg(r *Reader) error { + typ, err := r.NextType() + if err != nil { + return err + } + switch typ { + case Float32Type: + f, err := r.ReadFloat32() + if err != nil { + return err + } + n.AsFloat32(f) + return nil + case Float64Type: + f, err := r.ReadFloat64() + if err != nil { + return err + } + n.AsFloat64(f) + return nil + case IntType: + i, err := r.ReadInt64() + if err != nil { + return err + } + n.AsInt(i) + return nil + case UintType: + u, err := r.ReadUint64() + if err != nil { + return err + } + n.AsUint(u) + return nil + default: + return TypeError{Encoded: typ, Method: IntType} + } +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) { + typ := NextType(b) + switch typ { + case IntType: + i, o, err := ReadInt64Bytes(b) + if err != nil { + return b, err + } + n.AsInt(i) + return o, nil + case UintType: + u, o, err := ReadUint64Bytes(b) + if err != nil { + return b, err + } + n.AsUint(u) + return o, nil + case Float64Type: + f, o, err := ReadFloat64Bytes(b) + if err != nil { + return b, err + } + n.AsFloat64(f) + return o, nil + case Float32Type: + f, o, err := ReadFloat32Bytes(b) + if err != nil { + return b, err + } + n.AsFloat32(f) + return o, nil + default: + return b, TypeError{Method: IntType, Encoded: typ} + } +} + +// MarshalMsg implements msgp.Marshaler +func (n *Number) MarshalMsg(b []byte) ([]byte, error) { + switch n.typ { + case IntType: + return AppendInt64(b, int64(n.bits)), nil + case UintType: + return AppendUint64(b, uint64(n.bits)), nil + case Float64Type: + return AppendFloat64(b, math.Float64frombits(n.bits)), nil + case Float32Type: + return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil + default: + return AppendInt64(b, 0), nil + } +} + +// EncodeMsg implements msgp.Encodable +func (n *Number) EncodeMsg(w *Writer) error { + switch n.typ { + case IntType: + return w.WriteInt64(int64(n.bits)) + case UintType: + return w.WriteUint64(n.bits) + case Float64Type: + return w.WriteFloat64(math.Float64frombits(n.bits)) + case Float32Type: + return w.WriteFloat32(math.Float32frombits(uint32(n.bits))) + default: + return w.WriteInt64(0) + } +} + +// Msgsize implements msgp.Sizer +func (n *Number) Msgsize() int { + switch n.typ { + case Float32Type: + return Float32Size + case Float64Type: + return Float64Size + case IntType: + return Int64Size + case UintType: + return Uint64Size + default: + return 1 // fixint(0) + } +} + +// MarshalJSON implements json.Marshaler +func (n *Number) MarshalJSON() ([]byte, error) { + t := n.Type() + if t == InvalidType { + return []byte{'0'}, nil + } + out := make([]byte, 0, 32) + switch t { + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.AppendFloat(out, f, 'f', -1, 64), nil + case IntType: + i, _ := n.Int() + return strconv.AppendInt(out, i, 10), nil + case UintType: + u, _ := n.Uint() + return strconv.AppendUint(out, u, 10), nil + default: + panic("(*Number).typ is invalid") + } +} + +// String implements fmt.Stringer +func (n *Number) String() string { + switch n.typ { + case InvalidType: + return "0" + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.FormatFloat(f, 'f', -1, 64) + case IntType: + i, _ := n.Int() + return strconv.FormatInt(i, 10) + case UintType: + u, _ := n.Uint() + return strconv.FormatUint(u, 10) + default: + panic("(*Number).typ is invalid") + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go new file mode 100644 index 00000000..c828f7ec --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/purego.go @@ -0,0 +1,15 @@ +// +build purego appengine + +package msgp + +// let's just assume appengine +// uses 64-bit hardware... +const smallint = false + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go new file mode 100644 index 00000000..fe2de9e0 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read.go @@ -0,0 +1,1363 @@ +package msgp + +import ( + "io" + "math" + "sync" + "time" + + "github.com/philhofer/fwd" +) + +// where we keep old *Readers +var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }} + +// Type is a MessagePack wire type, +// including this package's built-in +// extension types. +type Type byte + +// MessagePack Types +// +// The zero value of Type +// is InvalidType. +const ( + InvalidType Type = iota + + // MessagePack built-in types + + StrType + BinType + MapType + ArrayType + Float64Type + Float32Type + BoolType + IntType + UintType + NilType + ExtensionType + + // pseudo-types provided + // by extensions + + Complex64Type + Complex128Type + TimeType + + _maxtype +) + +// String implements fmt.Stringer +func (t Type) String() string { + switch t { + case StrType: + return "str" + case BinType: + return "bin" + case MapType: + return "map" + case ArrayType: + return "array" + case Float64Type: + return "float64" + case Float32Type: + return "float32" + case BoolType: + return "bool" + case UintType: + return "uint" + case IntType: + return "int" + case ExtensionType: + return "ext" + case NilType: + return "nil" + default: + return "" + } +} + +func freeR(m *Reader) { + readerPool.Put(m) +} + +// Unmarshaler is the interface fulfilled +// by objects that know how to unmarshal +// themselves from MessagePack. +// UnmarshalMsg unmarshals the object +// from binary, returing any leftover +// bytes and any errors encountered. +type Unmarshaler interface { + UnmarshalMsg([]byte) ([]byte, error) +} + +// Decodable is the interface fulfilled +// by objects that know how to read +// themselves from a *Reader. +type Decodable interface { + DecodeMsg(*Reader) error +} + +// Decode decodes 'd' from 'r'. +func Decode(r io.Reader, d Decodable) error { + rd := NewReader(r) + err := d.DecodeMsg(rd) + freeR(rd) + return err +} + +// NewReader returns a *Reader that +// reads from the provided reader. The +// reader will be buffered. +func NewReader(r io.Reader) *Reader { + p := readerPool.Get().(*Reader) + if p.R == nil { + p.R = fwd.NewReader(r) + } else { + p.R.Reset(r) + } + return p +} + +// NewReaderSize returns a *Reader with a buffer of the given size. +// (This is vastly preferable to passing the decoder a reader that is already buffered.) +func NewReaderSize(r io.Reader, sz int) *Reader { + return &Reader{R: fwd.NewReaderSize(r, sz)} +} + +// NewReaderBuf returns a *Reader with a provided buffer. +func NewReaderBuf(r io.Reader, buf []byte) *Reader { + return &Reader{R: fwd.NewReaderBuf(r, buf)} +} + +// Reader wraps an io.Reader and provides +// methods to read MessagePack-encoded values +// from it. Readers are buffered. +type Reader struct { + // R is the buffered reader + // that the Reader uses + // to decode MessagePack. + // The Reader itself + // is stateless; all the + // buffering is done + // within R. + R *fwd.Reader + scratch []byte +} + +// Read implements `io.Reader` +func (m *Reader) Read(p []byte) (int, error) { + return m.R.Read(p) +} + +// CopyNext reads the next object from m without decoding it and writes it to w. +// It avoids unnecessary copies internally. +func (m *Reader) CopyNext(w io.Writer) (int64, error) { + sz, o, err := getNextSize(m.R) + if err != nil { + return 0, err + } + + var n int64 + // Opportunistic optimization: if we can fit the whole thing in the m.R + // buffer, then just get a pointer to that, and pass it to w.Write, + // avoiding an allocation. + if int(sz) <= m.R.BufferSize() { + var nn int + var buf []byte + buf, err = m.R.Next(int(sz)) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + return 0, err + } + nn, err = w.Write(buf) + n += int64(nn) + } else { + // Fall back to io.CopyN. + // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer) + n, err = io.CopyN(w, m.R, int64(sz)) + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + } + if err != nil { + return n, err + } else if n < int64(sz) { + return n, io.ErrShortWrite + } + + // for maps and slices, read elements + for x := uintptr(0); x < o; x++ { + var n2 int64 + n2, err = m.CopyNext(w) + if err != nil { + return n, err + } + n += n2 + } + return n, nil +} + +// ReadFull implements `io.ReadFull` +func (m *Reader) ReadFull(p []byte) (int, error) { + return m.R.ReadFull(p) +} + +// Reset resets the underlying reader. +func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) } + +// Buffered returns the number of bytes currently in the read buffer. +func (m *Reader) Buffered() int { return m.R.Buffered() } + +// BufferSize returns the capacity of the read buffer. +func (m *Reader) BufferSize() int { return m.R.BufferSize() } + +// NextType returns the next object type to be decoded. +func (m *Reader) NextType() (Type, error) { + p, err := m.R.Peek(1) + if err != nil { + return InvalidType, err + } + t := getType(p[0]) + if t == InvalidType { + return t, InvalidPrefixError(p[0]) + } + if t == ExtensionType { + v, err := m.peekExtensionType() + if err != nil { + return InvalidType, err + } + switch v { + case Complex64Extension: + return Complex64Type, nil + case Complex128Extension: + return Complex128Type, nil + case TimeExtension: + return TimeType, nil + } + } + return t, nil +} + +// IsNil returns whether or not +// the next byte is a null messagepack byte +func (m *Reader) IsNil() bool { + p, err := m.R.Peek(1) + return err == nil && p[0] == mnil +} + +// getNextSize returns the size of the next object on the wire. +// returns (obj size, obj elements, error) +// only maps and arrays have non-zero obj elements +// for maps and arrays, obj size does not include elements +// +// use uintptr b/c it's guaranteed to be large enough +// to hold whatever we can fit in memory. +func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) { + b, err := r.Peek(1) + if err != nil { + return 0, 0, err + } + lead := b[0] + spec := &sizes[lead] + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { + return uintptr(size), uintptr(mode), nil + } + b, err = r.Peek(int(size)) + if err != nil { + return 0, 0, err + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} + +// Skip skips over the next object, regardless of +// its type. If it is an array or map, the whole array +// or map will be skipped. +func (m *Reader) Skip() error { + var ( + v uintptr // bytes + o uintptr // objects + err error + p []byte + ) + + // we can use the faster + // method if we have enough + // buffered data + if m.R.Buffered() >= 5 { + p, err = m.R.Peek(5) + if err != nil { + return err + } + v, o, err = getSize(p) + if err != nil { + return err + } + } else { + v, o, err = getNextSize(m.R) + if err != nil { + return err + } + } + + // 'v' is always non-zero + // if err == nil + _, err = m.R.Skip(int(v)) + if err != nil { + return err + } + + // for maps and slices, skip elements + for x := uintptr(0); x < o; x++ { + err = m.Skip() + if err != nil { + return err + } + } + return nil +} + +// ReadMapHeader reads the next object +// as a map header and returns the size +// of the map and the number of bytes written. +// It will return a TypeError{} if the next +// object is not a map. +func (m *Reader) ReadMapHeader() (sz uint32, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mmap16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mmap32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKey reads either a 'str' or 'bin' field from +// the reader and returns the value as a []byte. It uses +// scratch for storage if it is large enough. +func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) { + out, err := m.ReadStringAsBytes(scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return m.ReadBytes(scratch) + } + return nil, err + } + return out, nil +} + +// MapKeyPtr returns a []byte pointing to the contents +// of a valid map key. The key cannot be empty, and it +// must be shorter than the total buffer size of the +// *Reader. Additionally, the returned slice is only +// valid until the next *Reader method call. Users +// should exercise extreme care when using this +// method; writing into the returned slice may +// corrupt future reads. +func (m *Reader) ReadMapKeyPtr() ([]byte, error) { + p, err := m.R.Peek(1) + if err != nil { + return nil, err + } + lead := p[0] + var read int + if isfixstr(lead) { + read = int(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + switch lead { + case mstr8, mbin8: + p, err = m.R.Next(2) + if err != nil { + return nil, err + } + read = int(p[1]) + case mstr16, mbin16: + p, err = m.R.Next(3) + if err != nil { + return nil, err + } + read = int(big.Uint16(p[1:])) + case mstr32, mbin32: + p, err = m.R.Next(5) + if err != nil { + return nil, err + } + read = int(big.Uint32(p[1:])) + default: + return nil, badPrefix(StrType, lead) + } +fill: + if read == 0 { + return nil, ErrShortBytes + } + return m.R.Next(read) +} + +// ReadArrayHeader reads the next object as an +// array header and returns the size of the array +// and the number of bytes read. +func (m *Reader) ReadArrayHeader() (sz uint32, err error) { + var lead byte + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case marray16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + + case marray32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNil reads a 'nil' MessagePack byte from the reader +func (m *Reader) ReadNil() error { + p, err := m.R.Peek(1) + if err != nil { + return err + } + if p[0] != mnil { + return badPrefix(NilType, p[0]) + } + _, err = m.R.Skip(1) + return err +} + +// ReadFloat64 reads a float64 from the reader. +// (If the value on the wire is encoded as a float32, +// it will be up-cast to a float64.) +func (m *Reader) ReadFloat64() (f float64, err error) { + var p []byte + p, err = m.R.Peek(9) + if err != nil { + // we'll allow a coversion from float32 to float64, + // since we don't lose any precision + if err == io.EOF && len(p) > 0 && p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + return + } + if p[0] != mfloat64 { + // see above + if p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + err = badPrefix(Float64Type, p[0]) + return + } + f = math.Float64frombits(getMuint64(p)) + _, err = m.R.Skip(9) + return +} + +// ReadFloat32 reads a float32 from the reader +func (m *Reader) ReadFloat32() (f float32, err error) { + var p []byte + p, err = m.R.Peek(5) + if err != nil { + return + } + if p[0] != mfloat32 { + err = badPrefix(Float32Type, p[0]) + return + } + f = math.Float32frombits(getMuint32(p)) + _, err = m.R.Skip(5) + return +} + +// ReadBool reads a bool from the reader +func (m *Reader) ReadBool() (b bool, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mtrue: + b = true + case mfalse: + default: + err = badPrefix(BoolType, p[0]) + return + } + _, err = m.R.Skip(1) + return +} + +// ReadInt64 reads an int64 from the reader +func (m *Reader) ReadInt64() (i int64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixint(lead) { + i = int64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } else if isnfixint(lead) { + i = int64(rnfixint(lead)) + _, err = m.R.Skip(1) + return + } + + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMint8(p)) + return + + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMuint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMint16(p)) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMuint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMint32(p)) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMuint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + i = getMint64(p) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u := getMuint64(p) + if u > math.MaxInt64 { + err = UintOverflow{Value: u, FailedBitsize: 64} + return + } + i = int64(u) + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32 reads an int32 from the reader +func (m *Reader) ReadInt32() (i int32, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt32 || in < math.MinInt32 { + err = IntOverflow{Value: in, FailedBitsize: 32} + return + } + i = int32(in) + return +} + +// ReadInt16 reads an int16 from the reader +func (m *Reader) ReadInt16() (i int16, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt16 || in < math.MinInt16 { + err = IntOverflow{Value: in, FailedBitsize: 16} + return + } + i = int16(in) + return +} + +// ReadInt8 reads an int8 from the reader +func (m *Reader) ReadInt8() (i int8, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt8 || in < math.MinInt8 { + err = IntOverflow{Value: in, FailedBitsize: 8} + return + } + i = int8(in) + return +} + +// ReadInt reads an int from the reader +func (m *Reader) ReadInt() (i int, err error) { + if smallint { + var in int32 + in, err = m.ReadInt32() + i = int(in) + return + } + var in int64 + in, err = m.ReadInt64() + i = int(in) + return +} + +// ReadUint64 reads a uint64 from the reader +func (m *Reader) ReadUint64() (u uint64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + v := int64(getMint8(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + u = uint64(getMuint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + v := int64(getMint16(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + u = uint64(getMuint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + v := int64(getMint32(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + u = uint64(getMuint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + v := int64(getMint64(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u = getMuint64(p) + return + + default: + if isnfixint(lead) { + err = UintBelowZero{Value: int64(rnfixint(lead))} + } else { + err = badPrefix(UintType, lead) + } + return + + } +} + +// ReadUint32 reads a uint32 from the reader +func (m *Reader) ReadUint32() (u uint32, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint32 { + err = UintOverflow{Value: in, FailedBitsize: 32} + return + } + u = uint32(in) + return +} + +// ReadUint16 reads a uint16 from the reader +func (m *Reader) ReadUint16() (u uint16, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint16 { + err = UintOverflow{Value: in, FailedBitsize: 16} + return + } + u = uint16(in) + return +} + +// ReadUint8 reads a uint8 from the reader +func (m *Reader) ReadUint8() (u uint8, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + u = uint8(in) + return +} + +// ReadUint reads a uint from the reader +func (m *Reader) ReadUint() (u uint, err error) { + if smallint { + var un uint32 + un, err = m.ReadUint32() + u = uint(un) + return + } + var un uint64 + un, err = m.ReadUint64() + u = uint(un) + return +} + +// ReadByte is analogous to ReadUint8. +// +// NOTE: this is *not* an implementation +// of io.ByteReader. +func (m *Reader) ReadByte() (b byte, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + b = byte(in) + return +} + +// ReadBytes reads a MessagePack 'bin' object +// from the reader and returns its value. It may +// use 'scratch' for storage if it is non-nil. +func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead = p[0] + var read int64 + switch lead { + case mbin8: + read = int64(p[1]) + m.R.Skip(2) + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(BinType, lead) + return + } + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadBytesHeader reads the size header +// of a MessagePack 'bin' object. The user +// is responsible for dealing with the next +// 'sz' bytes from the reader in an application-specific +// way. +func (m *Reader) ReadBytesHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mbin8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = uint32(big.Uint32(p[1:])) + return + default: + err = badPrefix(BinType, p[0]) + return + } +} + +// ReadExactBytes reads a MessagePack 'bin'-encoded +// object off of the wire into the provided slice. An +// ArrayError will be returned if the object is not +// exactly the length of the input slice. +func (m *Reader) ReadExactBytes(into []byte) error { + p, err := m.R.Peek(2) + if err != nil { + return err + } + lead := p[0] + var read int64 // bytes to read + var skip int // prefix size to skip + switch lead { + case mbin8: + read = int64(p[1]) + skip = 2 + case mbin16: + p, err = m.R.Peek(3) + if err != nil { + return err + } + read = int64(big.Uint16(p[1:])) + skip = 3 + case mbin32: + p, err = m.R.Peek(5) + if err != nil { + return err + } + read = int64(big.Uint32(p[1:])) + skip = 5 + default: + return badPrefix(BinType, lead) + } + if read != int64(len(into)) { + return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)} + } + m.R.Skip(skip) + _, err = m.R.ReadFull(into) + return err +} + +// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string +// and returns its value as bytes. It may use 'scratch' for storage +// if it is non-nil. +func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + var read int64 + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadStringHeader reads a string header +// off of the wire. The user is then responsible +// for dealing with the next 'sz' bytes from +// the reader in an application-specific manner. +func (m *Reader) ReadStringHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead := p[0] + if isfixstr(lead) { + sz = uint32(rfixstr(lead)) + m.R.Skip(1) + return + } + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(StrType, lead) + return + } +} + +// ReadString reads a utf-8 string from the reader +func (m *Reader) ReadString() (s string, err error) { + var p []byte + var lead byte + var read int64 + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if read == 0 { + s, err = "", nil + return + } + // reading into the memory + // that will become the string + // itself has vastly superior + // worst-case performance, because + // the reader buffer doesn't have + // to be large enough to hold the string. + // the idea here is to make it more + // difficult for someone malicious + // to cause the system to run out of + // memory by sending very large strings. + // + // NOTE: this works because the argument + // passed to (*fwd.Reader).ReadFull escapes + // to the heap; its argument may, in turn, + // be passed to the underlying reader, and + // thus escape analysis *must* conclude that + // 'out' escapes. + out := make([]byte, read) + _, err = m.R.ReadFull(out) + if err != nil { + return + } + s = UnsafeString(out) + return +} + +// ReadComplex64 reads a complex64 from the reader +func (m *Reader) ReadComplex64() (f complex64, err error) { + var p []byte + p, err = m.R.Peek(10) + if err != nil { + return + } + if p[0] != mfixext8 { + err = badPrefix(Complex64Type, p[0]) + return + } + if int8(p[1]) != Complex64Extension { + err = errExt(int8(p[1]), Complex64Extension) + return + } + f = complex(math.Float32frombits(big.Uint32(p[2:])), + math.Float32frombits(big.Uint32(p[6:]))) + _, err = m.R.Skip(10) + return +} + +// ReadComplex128 reads a complex128 from the reader +func (m *Reader) ReadComplex128() (f complex128, err error) { + var p []byte + p, err = m.R.Peek(18) + if err != nil { + return + } + if p[0] != mfixext16 { + err = badPrefix(Complex128Type, p[0]) + return + } + if int8(p[1]) != Complex128Extension { + err = errExt(int8(p[1]), Complex128Extension) + return + } + f = complex(math.Float64frombits(big.Uint64(p[2:])), + math.Float64frombits(big.Uint64(p[10:]))) + _, err = m.R.Skip(18) + return +} + +// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}. +// (You must pass a non-nil map into the function.) +func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) { + var sz uint32 + sz, err = m.ReadMapHeader() + if err != nil { + return + } + for key := range mp { + delete(mp, key) + } + for i := uint32(0); i < sz; i++ { + var key string + var val interface{} + key, err = m.ReadString() + if err != nil { + return + } + val, err = m.ReadIntf() + if err != nil { + return + } + mp[key] = val + } + return +} + +// ReadTime reads a time.Time object from the reader. +// The returned time's location will be set to time.Local. +func (m *Reader) ReadTime() (t time.Time, err error) { + var p []byte + p, err = m.R.Peek(15) + if err != nil { + return + } + if p[0] != mext8 || p[1] != 12 { + err = badPrefix(TimeType, p[0]) + return + } + if int8(p[2]) != TimeExtension { + err = errExt(int8(p[2]), TimeExtension) + return + } + sec, nsec := getUnix(p[3:]) + t = time.Unix(sec, int64(nsec)).Local() + _, err = m.R.Skip(15) + return +} + +// ReadIntf reads out the next object as a raw interface{}. +// Arrays are decoded as []interface{}, and maps are decoded +// as map[string]interface{}. Integers are decoded as int64 +// and unsigned integers are decoded as uint64. +func (m *Reader) ReadIntf() (i interface{}, err error) { + var t Type + t, err = m.NextType() + if err != nil { + return + } + switch t { + case BoolType: + i, err = m.ReadBool() + return + + case IntType: + i, err = m.ReadInt64() + return + + case UintType: + i, err = m.ReadUint64() + return + + case BinType: + i, err = m.ReadBytes(nil) + return + + case StrType: + i, err = m.ReadString() + return + + case Complex64Type: + i, err = m.ReadComplex64() + return + + case Complex128Type: + i, err = m.ReadComplex128() + return + + case TimeType: + i, err = m.ReadTime() + return + + case ExtensionType: + var t int8 + t, err = m.peekExtensionType() + if err != nil { + return + } + f, ok := extensionReg[t] + if ok { + e := f() + err = m.ReadExtension(e) + i = e + return + } + var e RawExtension + e.Type = t + err = m.ReadExtension(&e) + i = &e + return + + case MapType: + mp := make(map[string]interface{}) + err = m.ReadMapStrIntf(mp) + i = mp + return + + case NilType: + err = m.ReadNil() + i = nil + return + + case Float32Type: + i, err = m.ReadFloat32() + return + + case Float64Type: + i, err = m.ReadFloat64() + return + + case ArrayType: + var sz uint32 + sz, err = m.ReadArrayHeader() + + if err != nil { + return + } + out := make([]interface{}, int(sz)) + for j := range out { + out[j], err = m.ReadIntf() + if err != nil { + return + } + } + i = out + return + + default: + return nil, fatal // unreachable + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go new file mode 100644 index 00000000..f6674507 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go @@ -0,0 +1,1237 @@ +package msgp + +import ( + "bytes" + "encoding/binary" + "math" + "time" +) + +var big = binary.BigEndian + +// NextType returns the type of the next +// object in the slice. If the length +// of the input is zero, it returns +// InvalidType. +func NextType(b []byte) Type { + if len(b) == 0 { + return InvalidType + } + spec := sizes[b[0]] + t := spec.typ + if t == ExtensionType && len(b) > int(spec.size) { + var tp int8 + if spec.extra == constsize { + tp = int8(b[1]) + } else { + tp = int8(b[spec.size-1]) + } + switch tp { + case TimeExtension: + return TimeType + case Complex128Extension: + return Complex128Type + case Complex64Extension: + return Complex64Type + default: + return ExtensionType + } + } + return t +} + +// IsNil returns true if len(b)>0 and +// the leading byte is a 'nil' MessagePack +// byte; false otherwise +func IsNil(b []byte) bool { + if len(b) != 0 && b[0] == mnil { + return true + } + return false +} + +// Raw is raw MessagePack. +// Raw allows you to read and write +// data without interpreting its contents. +type Raw []byte + +// MarshalMsg implements msgp.Marshaler. +// It appends the raw contents of 'raw' +// to the provided byte slice. If 'raw' +// is 0 bytes, 'nil' will be appended instead. +func (r Raw) MarshalMsg(b []byte) ([]byte, error) { + i := len(r) + if i == 0 { + return AppendNil(b), nil + } + o, l := ensure(b, i) + copy(o[l:], []byte(r)) + return o, nil +} + +// UnmarshalMsg implements msgp.Unmarshaler. +// It sets the contents of *Raw to be the next +// object in the provided byte slice. +func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) { + l := len(b) + out, err := Skip(b) + if err != nil { + return b, err + } + rlen := l - len(out) + if IsNil(b[:rlen]) { + rlen = 0 + } + if cap(*r) < rlen { + *r = make(Raw, rlen) + } else { + *r = (*r)[0:rlen] + } + copy(*r, b[:rlen]) + return out, nil +} + +// EncodeMsg implements msgp.Encodable. +// It writes the raw bytes to the writer. +// If r is empty, it writes 'nil' instead. +func (r Raw) EncodeMsg(w *Writer) error { + if len(r) == 0 { + return w.WriteNil() + } + _, err := w.Write([]byte(r)) + return err +} + +// DecodeMsg implements msgp.Decodable. +// It sets the value of *Raw to be the +// next object on the wire. +func (r *Raw) DecodeMsg(f *Reader) error { + *r = (*r)[:0] + err := appendNext(f, (*[]byte)(r)) + if IsNil(*r) { + *r = (*r)[:0] + } + return err +} + +// Msgsize implements msgp.Sizer +func (r Raw) Msgsize() int { + l := len(r) + if l == 0 { + return 1 // for 'nil' + } + return l +} + +func appendNext(f *Reader, d *[]byte) error { + amt, o, err := getNextSize(f.R) + if err != nil { + return err + } + var i int + *d, i = ensure(*d, int(amt)) + _, err = f.R.ReadFull((*d)[i:]) + if err != nil { + return err + } + for o > 0 { + err = appendNext(f, d) + if err != nil { + return err + } + o-- + } + return nil +} + +// MarshalJSON implements json.Marshaler +func (r *Raw) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + _, err := UnmarshalAsJSON(&buf, []byte(*r)) + return buf.Bytes(), err +} + +// ReadMapHeaderBytes reads a map header size +// from 'b' and returns the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a map) +func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + o = b[1:] + return + } + + switch lead { + case mmap16: + if l < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case mmap32: + if l < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKeyZC attempts to read a map key +// from 'b' and returns the key bytes and the remaining bytes +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a str or bin) +func ReadMapKeyZC(b []byte) ([]byte, []byte, error) { + o, x, err := ReadStringZC(b) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return ReadBytesZC(b) + } + return nil, b, err + } + return o, x, nil +} + +// ReadArrayHeaderBytes attempts to read +// the array header size off of 'b' and return +// the size and remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not an array) +func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + lead := b[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + o = b[1:] + return + } + + switch lead { + case marray16: + if len(b) < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case marray32: + if len(b) < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadBytesHeader reads the 'bin' header size +// off of 'b' and returns the size and remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a bin object) +func ReadBytesHeader(b []byte) (sz uint32, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + switch b[0] { + case mbin8: + if len(b) < 2 { + err = ErrShortBytes + return + } + sz = uint32(b[1]) + o = b[2:] + return + case mbin16: + if len(b) < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + case mbin32: + if len(b) < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + default: + err = badPrefix(BinType, b[0]) + return + } +} + +// ReadNilBytes tries to read a "nil" byte +// off of 'b' and return the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'nil') +// - InvalidPrefixError +func ReadNilBytes(b []byte) ([]byte, error) { + if len(b) < 1 { + return nil, ErrShortBytes + } + if b[0] != mnil { + return b, badPrefix(NilType, b[0]) + } + return b[1:], nil +} + +// ReadFloat64Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float64) +func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) { + if len(b) < 9 { + if len(b) >= 5 && b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = ErrShortBytes + return + } + + if b[0] != mfloat64 { + if b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = badPrefix(Float64Type, b[0]) + return + } + + f = math.Float64frombits(getMuint64(b)) + o = b[9:] + return +} + +// ReadFloat32Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float32) +func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) { + if len(b) < 5 { + err = ErrShortBytes + return + } + + if b[0] != mfloat32 { + err = TypeError{Method: Float32Type, Encoded: getType(b[0])} + return + } + + f = math.Float32frombits(getMuint32(b)) + o = b[5:] + return +} + +// ReadBoolBytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a bool) +func ReadBoolBytes(b []byte) (bool, []byte, error) { + if len(b) < 1 { + return false, b, ErrShortBytes + } + switch b[0] { + case mtrue: + return true, b[1:], nil + case mfalse: + return false, b[1:], nil + default: + return false, b, badPrefix(BoolType, b[0]) + } +} + +// ReadInt64Bytes tries to read an int64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError (not a int) +func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + i = int64(rfixint(lead)) + o = b[1:] + return + } + if isnfixint(lead) { + i = int64(rnfixint(lead)) + o = b[1:] + return + } + + switch lead { + case mint8: + if l < 2 { + err = ErrShortBytes + return + } + i = int64(getMint8(b)) + o = b[2:] + return + + case muint8: + if l < 2 { + err = ErrShortBytes + return + } + i = int64(getMuint8(b)) + o = b[2:] + return + + case mint16: + if l < 3 { + err = ErrShortBytes + return + } + i = int64(getMint16(b)) + o = b[3:] + return + + case muint16: + if l < 3 { + err = ErrShortBytes + return + } + i = int64(getMuint16(b)) + o = b[3:] + return + + case mint32: + if l < 5 { + err = ErrShortBytes + return + } + i = int64(getMint32(b)) + o = b[5:] + return + + case muint32: + if l < 5 { + err = ErrShortBytes + return + } + i = int64(getMuint32(b)) + o = b[5:] + return + + case mint64: + if l < 9 { + err = ErrShortBytes + return + } + i = int64(getMint64(b)) + o = b[9:] + return + + case muint64: + if l < 9 { + err = ErrShortBytes + return + } + u := getMuint64(b) + if u > math.MaxInt64 { + err = UintOverflow{Value: u, FailedBitsize: 64} + return + } + i = int64(u) + o = b[9:] + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32Bytes tries to read an int32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int32) +func ReadInt32Bytes(b []byte) (int32, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt32 || i < math.MinInt32 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 32} + } + return int32(i), o, err +} + +// ReadInt16Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int16) +func ReadInt16Bytes(b []byte) (int16, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt16 || i < math.MinInt16 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 16} + } + return int16(i), o, err +} + +// ReadInt8Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int8) +func ReadInt8Bytes(b []byte) (int8, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt8 || i < math.MinInt8 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 8} + } + return int8(i), o, err +} + +// ReadIntBytes tries to read an int +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int; 32-bit platforms only) +func ReadIntBytes(b []byte) (int, []byte, error) { + if smallint { + i, b, err := ReadInt32Bytes(b) + return int(i), b, err + } + i, b, err := ReadInt64Bytes(b) + return int(i), b, err +} + +// ReadUint64Bytes tries to read a uint64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + o = b[1:] + return + } + + switch lead { + case mint8: + if l < 2 { + err = ErrShortBytes + return + } + v := int64(getMint8(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[2:] + return + + case muint8: + if l < 2 { + err = ErrShortBytes + return + } + u = uint64(getMuint8(b)) + o = b[2:] + return + + case mint16: + if l < 3 { + err = ErrShortBytes + return + } + v := int64(getMint16(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[3:] + return + + case muint16: + if l < 3 { + err = ErrShortBytes + return + } + u = uint64(getMuint16(b)) + o = b[3:] + return + + case mint32: + if l < 5 { + err = ErrShortBytes + return + } + v := int64(getMint32(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[5:] + return + + case muint32: + if l < 5 { + err = ErrShortBytes + return + } + u = uint64(getMuint32(b)) + o = b[5:] + return + + case mint64: + if l < 9 { + err = ErrShortBytes + return + } + v := int64(getMint64(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[9:] + return + + case muint64: + if l < 9 { + err = ErrShortBytes + return + } + u = getMuint64(b) + o = b[9:] + return + + default: + if isnfixint(lead) { + err = UintBelowZero{Value: int64(rnfixint(lead))} + } else { + err = badPrefix(UintType, lead) + } + return + } +} + +// ReadUint32Bytes tries to read a uint32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint32) +func ReadUint32Bytes(b []byte) (uint32, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint32 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 32} + } + return uint32(v), o, err +} + +// ReadUint16Bytes tries to read a uint16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint16) +func ReadUint16Bytes(b []byte) (uint16, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint16 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 16} + } + return uint16(v), o, err +} + +// ReadUint8Bytes tries to read a uint8 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint8) +func ReadUint8Bytes(b []byte) (uint8, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint8 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 8} + } + return uint8(v), o, err +} + +// ReadUintBytes tries to read a uint +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint; 32-bit platforms only) +func ReadUintBytes(b []byte) (uint, []byte, error) { + if smallint { + u, b, err := ReadUint32Bytes(b) + return uint(u), b, err + } + u, b, err := ReadUint64Bytes(b) + return uint(u), b, err +} + +// ReadByteBytes is analogous to ReadUint8Bytes +func ReadByteBytes(b []byte) (byte, []byte, error) { + return ReadUint8Bytes(b) +} + +// ReadBytesBytes reads a 'bin' object +// from 'b' and returns its vaue and +// the remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'bin' object) +func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, scratch, false) +} + +func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = int(b[1]) + b = b[2:] + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = badPrefix(BinType, lead) + return + } + + if len(b) < read { + err = ErrShortBytes + return + } + + // zero-copy + if zc { + v = b[0:read] + o = b[read:] + return + } + + if cap(scratch) >= read { + v = scratch[0:read] + } else { + v = make([]byte, read) + } + + o = b[copy(v, b):] + return +} + +// ReadBytesZC extracts the messagepack-encoded +// binary field without copying. The returned []byte +// points to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'bin') +func ReadBytesZC(b []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, nil, true) +} + +func ReadExactBytes(b []byte, into []byte) (o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + var read uint32 + var skip int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = uint32(b[1]) + skip = 2 + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = uint32(big.Uint16(b[1:])) + skip = 3 + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = uint32(big.Uint32(b[1:])) + skip = 5 + + default: + err = badPrefix(BinType, lead) + return + } + + if read != uint32(len(into)) { + err = ArrayError{Wanted: uint32(len(into)), Got: read} + return + } + + o = b[skip+copy(into, b[skip:]):] + return +} + +// ReadStringZC reads a messagepack string field +// without copying. The returned []byte points +// to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'str') +func ReadStringZC(b []byte) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + b = b[1:] + } else { + switch lead { + case mstr8: + if l < 2 { + err = ErrShortBytes + return + } + read = int(b[1]) + b = b[2:] + + case mstr16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mstr32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = TypeError{Method: StrType, Encoded: getType(lead)} + return + } + } + + if len(b) < read { + err = ErrShortBytes + return + } + + v = b[0:read] + o = b[read:] + return +} + +// ReadStringBytes reads a 'str' object +// from 'b' and returns its value and the +// remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError +func ReadStringBytes(b []byte) (string, []byte, error) { + v, o, err := ReadStringZC(b) + return string(v), o, err +} + +// ReadStringAsBytes reads a 'str' object +// into a slice of bytes. 'v' is the value of +// the 'str' object, which may reside in memory +// pointed to by 'scratch.' 'o' is the remaining bytes +// in 'b.'' +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError (unknown type marker) +func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + var tmp []byte + tmp, o, err = ReadStringZC(b) + v = append(scratch[:0], tmp...) + return +} + +// ReadComplex128Bytes reads a complex128 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex128) +// - InvalidPrefixError +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex128) +func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) { + if len(b) < 18 { + err = ErrShortBytes + return + } + if b[0] != mfixext16 { + err = badPrefix(Complex128Type, b[0]) + return + } + if int8(b[1]) != Complex128Extension { + err = errExt(int8(b[1]), Complex128Extension) + return + } + c = complex(math.Float64frombits(big.Uint64(b[2:])), + math.Float64frombits(big.Uint64(b[10:]))) + o = b[18:] + return +} + +// ReadComplex64Bytes reads a complex64 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex64) +func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) { + if len(b) < 10 { + err = ErrShortBytes + return + } + if b[0] != mfixext8 { + err = badPrefix(Complex64Type, b[0]) + return + } + if b[1] != Complex64Extension { + err = errExt(int8(b[1]), Complex64Extension) + return + } + c = complex(math.Float32frombits(big.Uint32(b[2:])), + math.Float32frombits(big.Uint32(b[6:]))) + o = b[10:] + return +} + +// ReadTimeBytes reads a time.Time +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a time.Time) +func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { + if len(b) < 15 { + err = ErrShortBytes + return + } + if b[0] != mext8 || b[1] != 12 { + err = badPrefix(TimeType, b[0]) + return + } + if int8(b[2]) != TimeExtension { + err = errExt(int8(b[2]), TimeExtension) + return + } + sec, nsec := getUnix(b[3:]) + t = time.Unix(sec, int64(nsec)).Local() + o = b[15:] + return +} + +// ReadMapStrIntfBytes reads a map[string]interface{} +// out of 'b' and returns the map and remaining bytes. +// If 'old' is non-nil, the values will be read into that map. +func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { + var sz uint32 + o = b + sz, o, err = ReadMapHeaderBytes(o) + + if err != nil { + return + } + + if old != nil { + for key := range old { + delete(old, key) + } + v = old + } else { + v = make(map[string]interface{}, int(sz)) + } + + for z := uint32(0); z < sz; z++ { + if len(o) < 1 { + err = ErrShortBytes + return + } + var key []byte + key, o, err = ReadMapKeyZC(o) + if err != nil { + return + } + var val interface{} + val, o, err = ReadIntfBytes(o) + if err != nil { + return + } + v[string(key)] = val + } + return +} + +// ReadIntfBytes attempts to read +// the next object out of 'b' as a raw interface{} and +// return the remaining bytes. +func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { + if len(b) < 1 { + err = ErrShortBytes + return + } + + k := NextType(b) + + switch k { + case MapType: + i, o, err = ReadMapStrIntfBytes(b, nil) + return + + case ArrayType: + var sz uint32 + sz, o, err = ReadArrayHeaderBytes(b) + if err != nil { + return + } + j := make([]interface{}, int(sz)) + i = j + for d := range j { + j[d], o, err = ReadIntfBytes(o) + if err != nil { + return + } + } + return + + case Float32Type: + i, o, err = ReadFloat32Bytes(b) + return + + case Float64Type: + i, o, err = ReadFloat64Bytes(b) + return + + case IntType: + i, o, err = ReadInt64Bytes(b) + return + + case UintType: + i, o, err = ReadUint64Bytes(b) + return + + case BoolType: + i, o, err = ReadBoolBytes(b) + return + + case TimeType: + i, o, err = ReadTimeBytes(b) + return + + case Complex64Type: + i, o, err = ReadComplex64Bytes(b) + return + + case Complex128Type: + i, o, err = ReadComplex128Bytes(b) + return + + case ExtensionType: + var t int8 + t, err = peekExtension(b) + if err != nil { + return + } + // use a user-defined extension, + // if it's been registered + f, ok := extensionReg[t] + if ok { + e := f() + o, err = ReadExtensionBytes(b, e) + i = e + return + } + // last resort is a raw extension + e := RawExtension{} + e.Type = int8(t) + o, err = ReadExtensionBytes(b, &e) + i = &e + return + + case NilType: + o, err = ReadNilBytes(b) + return + + case BinType: + i, o, err = ReadBytesBytes(b, nil) + return + + case StrType: + i, o, err = ReadStringBytes(b) + return + + default: + err = InvalidPrefixError(b[0]) + return + } +} + +// Skip skips the next object in 'b' and +// returns the remaining bytes. If the object +// is a map or array, all of its elements +// will be skipped. +// Possible Errors: +// - ErrShortBytes (not enough bytes in b) +// - InvalidPrefixError (bad encoding) +func Skip(b []byte) ([]byte, error) { + sz, asz, err := getSize(b) + if err != nil { + return b, err + } + if uintptr(len(b)) < sz { + return b, ErrShortBytes + } + b = b[sz:] + for asz > 0 { + b, err = Skip(b) + if err != nil { + return b, err + } + asz-- + } + return b, nil +} + +// returns (skip N bytes, skip M objects, error) +func getSize(b []byte) (uintptr, uintptr, error) { + l := len(b) + if l == 0 { + return 0, 0, ErrShortBytes + } + lead := b[0] + spec := &sizes[lead] // get type information + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { // fixed composites + return uintptr(size), uintptr(mode), nil + } + if l < int(size) { + return 0, 0, ErrShortBytes + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go new file mode 100644 index 00000000..ce2f8b16 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/size.go @@ -0,0 +1,38 @@ +package msgp + +// The sizes provided +// are the worst-case +// encoded sizes for +// each type. For variable- +// length types ([]byte, string), +// the total encoded size is +// the prefix size plus the +// length of the object. +const ( + Int64Size = 9 + IntSize = Int64Size + UintSize = Int64Size + Int8Size = 2 + Int16Size = 3 + Int32Size = 5 + Uint8Size = 2 + ByteSize = Uint8Size + Uint16Size = 3 + Uint32Size = 5 + Uint64Size = Int64Size + Float64Size = 9 + Float32Size = 5 + Complex64Size = 10 + Complex128Size = 18 + + TimeSize = 15 + BoolSize = 1 + NilSize = 1 + + MapHeaderSize = 5 + ArrayHeaderSize = 5 + + BytesPrefixSize = 5 + StringPrefixSize = 5 + ExtensionPrefixSize = 6 +) diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go new file mode 100644 index 00000000..d9fb3535 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go @@ -0,0 +1,36 @@ +// +build !purego,!appengine + +package msgp + +import ( + "unsafe" +) + +// NOTE: +// all of the definition in this file +// should be repeated in appengine.go, +// but without using unsafe + +const ( + // spec says int and uint are always + // the same size, but that int/uint + // size may not be machine word size + smallint = unsafe.Sizeof(int(0)) == 4 +) + +// UnsafeString returns the byte slice as a volatile string +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// UnsafeBytes returns the string as a byte slice +// +// Deprecated: +// Since this code is no longer used by the code generator, +// UnsafeBytes(s) is precisely equivalent to []byte(s) +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go new file mode 100644 index 00000000..407ec1f8 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write.go @@ -0,0 +1,861 @@ +package msgp + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "sync" + "time" +) + +const ( + // min buffer size for the writer + minWriterSize = 18 +) + +// Sizer is an interface implemented +// by types that can estimate their +// size when MessagePack encoded. +// This interface is optional, but +// encoding/marshaling implementations +// may use this as a way to pre-allocate +// memory for serialization. +type Sizer interface { + Msgsize() int +} + +var ( + // Nowhere is an io.Writer to nowhere + Nowhere io.Writer = nwhere{} + + btsType = reflect.TypeOf(([]byte)(nil)) + writerPool = sync.Pool{ + New: func() interface{} { + return &Writer{buf: make([]byte, 2048)} + }, + } +) + +func popWriter(w io.Writer) *Writer { + wr := writerPool.Get().(*Writer) + wr.Reset(w) + return wr +} + +func pushWriter(wr *Writer) { + wr.w = nil + wr.wloc = 0 + writerPool.Put(wr) +} + +// freeW frees a writer for use +// by other processes. It is not necessary +// to call freeW on a writer. However, maintaining +// a reference to a *Writer after calling freeW on +// it will cause undefined behavior. +func freeW(w *Writer) { pushWriter(w) } + +// Require ensures that cap(old)-len(old) >= extra. +func Require(old []byte, extra int) []byte { + l := len(old) + c := cap(old) + r := l + extra + if c >= r { + return old + } else if l == 0 { + return make([]byte, 0, extra) + } + // the new size is the greater + // of double the old capacity + // and the sum of the old length + // and the number of new bytes + // necessary. + c <<= 1 + if c < r { + c = r + } + n := make([]byte, l, c) + copy(n, old) + return n +} + +// nowhere writer +type nwhere struct{} + +func (n nwhere) Write(p []byte) (int, error) { return len(p), nil } + +// Marshaler is the interface implemented +// by types that know how to marshal themselves +// as MessagePack. MarshalMsg appends the marshalled +// form of the object to the provided +// byte slice, returning the extended +// slice and any errors encountered. +type Marshaler interface { + MarshalMsg([]byte) ([]byte, error) +} + +// Encodable is the interface implemented +// by types that know how to write themselves +// as MessagePack using a *msgp.Writer. +type Encodable interface { + EncodeMsg(*Writer) error +} + +// Writer is a buffered writer +// that can be used to write +// MessagePack objects to an io.Writer. +// You must call *Writer.Flush() in order +// to flush all of the buffered data +// to the underlying writer. +type Writer struct { + w io.Writer + buf []byte + wloc int +} + +// NewWriter returns a new *Writer. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return popWriter(w) +} + +// NewWriterSize returns a writer with a custom buffer size. +func NewWriterSize(w io.Writer, sz int) *Writer { + // we must be able to require() 'minWriterSize' + // contiguous bytes, so that is the + // practical minimum buffer size + if sz < minWriterSize { + sz = minWriterSize + } + buf := make([]byte, sz) + return NewWriterBuf(w, buf) +} + +// NewWriterBuf returns a writer with a provided buffer. +// 'buf' is not used when the capacity is smaller than 18, +// custom buffer is allocated instead. +func NewWriterBuf(w io.Writer, buf []byte) *Writer { + if cap(buf) < minWriterSize { + buf = make([]byte, minWriterSize) + } + buf = buf[:cap(buf)] + return &Writer{ + w: w, + buf: buf, + } +} + +// Encode encodes an Encodable to an io.Writer. +func Encode(w io.Writer, e Encodable) error { + wr := NewWriter(w) + err := e.EncodeMsg(wr) + if err == nil { + err = wr.Flush() + } + freeW(wr) + return err +} + +func (mw *Writer) flush() error { + if mw.wloc == 0 { + return nil + } + n, err := mw.w.Write(mw.buf[:mw.wloc]) + if err != nil { + if n > 0 { + mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc]) + } + return err + } + mw.wloc = 0 + return nil +} + +// Flush flushes all of the buffered +// data to the underlying writer. +func (mw *Writer) Flush() error { return mw.flush() } + +// Buffered returns the number bytes in the write buffer +func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) bufsize() int { return len(mw.buf) } + +// NOTE: this should only be called with +// a number that is guaranteed to be less than +// len(mw.buf). typically, it is called with a constant. +// +// NOTE: this is a hot code path +func (mw *Writer) require(n int) (int, error) { + c := len(mw.buf) + wl := mw.wloc + if c-wl < n { + if err := mw.flush(); err != nil { + return 0, err + } + wl = mw.wloc + } + mw.wloc += n + return wl, nil +} + +func (mw *Writer) Append(b ...byte) error { + if mw.avail() < len(b) { + err := mw.flush() + if err != nil { + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], b) + return nil +} + +// push one byte onto the buffer +// +// NOTE: this is a hot code path +func (mw *Writer) push(b byte) error { + if mw.wloc == len(mw.buf) { + if err := mw.flush(); err != nil { + return err + } + } + mw.buf[mw.wloc] = b + mw.wloc++ + return nil +} + +func (mw *Writer) prefix8(b byte, u uint8) error { + const need = 2 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu8(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix16(b byte, u uint16) error { + const need = 3 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu16(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix32(b byte, u uint32) error { + const need = 5 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu32(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix64(b byte, u uint64) error { + const need = 9 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu64(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +// Write implements io.Writer, and writes +// data directly to the buffer. +func (mw *Writer) Write(p []byte) (int, error) { + l := len(p) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return 0, err + } + if l > len(mw.buf) { + return mw.w.Write(p) + } + } + mw.wloc += copy(mw.buf[mw.wloc:], p) + return l, nil +} + +// implements io.WriteString +func (mw *Writer) writeString(s string) error { + l := len(s) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return err + } + if l > len(mw.buf) { + _, err := io.WriteString(mw.w, s) + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], s) + return nil +} + +// Reset changes the underlying writer used by the Writer +func (mw *Writer) Reset(w io.Writer) { + mw.buf = mw.buf[:cap(mw.buf)] + mw.w = w + mw.wloc = 0 +} + +// WriteMapHeader writes a map header of the given +// size to the writer +func (mw *Writer) WriteMapHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixmap(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(mmap16, uint16(sz)) + default: + return mw.prefix32(mmap32, sz) + } +} + +// WriteArrayHeader writes an array header of the +// given size to the writer +func (mw *Writer) WriteArrayHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixarray(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(marray16, uint16(sz)) + default: + return mw.prefix32(marray32, sz) + } +} + +// WriteNil writes a nil byte to the buffer +func (mw *Writer) WriteNil() error { + return mw.push(mnil) +} + +// WriteFloat64 writes a float64 to the writer +func (mw *Writer) WriteFloat64(f float64) error { + return mw.prefix64(mfloat64, math.Float64bits(f)) +} + +// WriteFloat32 writes a float32 to the writer +func (mw *Writer) WriteFloat32(f float32) error { + return mw.prefix32(mfloat32, math.Float32bits(f)) +} + +// WriteInt64 writes an int64 to the writer +func (mw *Writer) WriteInt64(i int64) error { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return mw.push(wfixint(uint8(i))) + case i <= math.MaxInt16: + return mw.prefix16(mint16, uint16(i)) + case i <= math.MaxInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } + } + switch { + case i >= -32: + return mw.push(wnfixint(int8(i))) + case i >= math.MinInt8: + return mw.prefix8(mint8, uint8(i)) + case i >= math.MinInt16: + return mw.prefix16(mint16, uint16(i)) + case i >= math.MinInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } +} + +// WriteInt8 writes an int8 to the writer +func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) } + +// WriteInt16 writes an int16 to the writer +func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) } + +// WriteInt32 writes an int32 to the writer +func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) } + +// WriteInt writes an int to the writer +func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) } + +// WriteUint64 writes a uint64 to the writer +func (mw *Writer) WriteUint64(u uint64) error { + switch { + case u <= (1<<7)-1: + return mw.push(wfixint(uint8(u))) + case u <= math.MaxUint8: + return mw.prefix8(muint8, uint8(u)) + case u <= math.MaxUint16: + return mw.prefix16(muint16, uint16(u)) + case u <= math.MaxUint32: + return mw.prefix32(muint32, uint32(u)) + default: + return mw.prefix64(muint64, u) + } +} + +// WriteByte is analogous to WriteUint8 +func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) } + +// WriteUint8 writes a uint8 to the writer +func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint16 writes a uint16 to the writer +func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint32 writes a uint32 to the writer +func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint writes a uint to the writer +func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) } + +// WriteBytes writes binary as 'bin' to the writer +func (mw *Writer) WriteBytes(b []byte) error { + sz := uint32(len(b)) + var err error + switch { + case sz <= math.MaxUint8: + err = mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mbin16, uint16(sz)) + default: + err = mw.prefix32(mbin32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(b) + return err +} + +// WriteBytesHeader writes just the size header +// of a MessagePack 'bin' object. The user is responsible +// for then writing 'sz' more bytes into the stream. +func (mw *Writer) WriteBytesHeader(sz uint32) error { + switch { + case sz <= math.MaxUint8: + return mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mbin16, uint16(sz)) + default: + return mw.prefix32(mbin32, sz) + } +} + +// WriteBool writes a bool to the writer +func (mw *Writer) WriteBool(b bool) error { + if b { + return mw.push(mtrue) + } + return mw.push(mfalse) +} + +// WriteString writes a messagepack string to the writer. +// (This is NOT an implementation of io.StringWriter) +func (mw *Writer) WriteString(s string) error { + sz := uint32(len(s)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + return mw.writeString(s) +} + +// WriteStringHeader writes just the string size +// header of a MessagePack 'str' object. The user +// is responsible for writing 'sz' more valid UTF-8 +// bytes to the stream. +func (mw *Writer) WriteStringHeader(sz uint32) error { + switch { + case sz <= 31: + return mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + return mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mstr16, uint16(sz)) + default: + return mw.prefix32(mstr32, sz) + } +} + +// WriteStringFromBytes writes a 'str' object +// from a []byte. +func (mw *Writer) WriteStringFromBytes(str []byte) error { + sz := uint32(len(str)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(str) + return err +} + +// WriteComplex64 writes a complex64 to the writer +func (mw *Writer) WriteComplex64(f complex64) error { + o, err := mw.require(10) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = Complex64Extension + big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f))) + big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f))) + return nil +} + +// WriteComplex128 writes a complex128 to the writer +func (mw *Writer) WriteComplex128(f complex128) error { + o, err := mw.require(18) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = Complex128Extension + big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f))) + big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f))) + return nil +} + +// WriteMapStrStr writes a map[string]string to the writer +func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteString(val) + if err != nil { + return + } + } + return nil +} + +// WriteMapStrIntf writes a map[string]interface to the writer +func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteIntf(val) + if err != nil { + return + } + } + return +} + +// WriteTime writes a time.Time object to the wire. +// +// Time is encoded as Unix time, which means that +// location (time zone) data is removed from the object. +// The encoded object itself is 12 bytes: 8 bytes for +// a big-endian 64-bit integer denoting seconds +// elapsed since "zero" Unix time, followed by 4 bytes +// for a big-endian 32-bit signed integer denoting +// the nanosecond offset of the time. This encoding +// is intended to ease portability across languages. +// (Note that this is *not* the standard time.Time +// binary encoding, because its implementation relies +// heavily on the internal representation used by the +// time package.) +func (mw *Writer) WriteTime(t time.Time) error { + t = t.UTC() + o, err := mw.require(15) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 12 + mw.buf[o+2] = TimeExtension + putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond())) + return nil +} + +// WriteIntf writes the concrete type of 'v'. +// WriteIntf will error if 'v' is not one of the following: +// - A bool, float, string, []byte, int, uint, or complex +// - A map of supported types (with string keys) +// - An array or slice of supported types +// - A pointer to a supported type +// - A type that satisfies the msgp.Encodable interface +// - A type that satisfies the msgp.Extension interface +func (mw *Writer) WriteIntf(v interface{}) error { + if v == nil { + return mw.WriteNil() + } + switch v := v.(type) { + + // preferred interfaces + + case Encodable: + return v.EncodeMsg(mw) + case Extension: + return mw.WriteExtension(v) + + // concrete types + + case bool: + return mw.WriteBool(v) + case float32: + return mw.WriteFloat32(v) + case float64: + return mw.WriteFloat64(v) + case complex64: + return mw.WriteComplex64(v) + case complex128: + return mw.WriteComplex128(v) + case uint8: + return mw.WriteUint8(v) + case uint16: + return mw.WriteUint16(v) + case uint32: + return mw.WriteUint32(v) + case uint64: + return mw.WriteUint64(v) + case uint: + return mw.WriteUint(v) + case int8: + return mw.WriteInt8(v) + case int16: + return mw.WriteInt16(v) + case int32: + return mw.WriteInt32(v) + case int64: + return mw.WriteInt64(v) + case int: + return mw.WriteInt(v) + case string: + return mw.WriteString(v) + case []byte: + return mw.WriteBytes(v) + case map[string]string: + return mw.WriteMapStrStr(v) + case map[string]interface{}: + return mw.WriteMapStrIntf(v) + case time.Time: + return mw.WriteTime(v) + } + + val := reflect.ValueOf(v) + if !isSupported(val.Kind()) || !val.IsValid() { + return fmt.Errorf("msgp: type %s not supported", val) + } + + switch val.Kind() { + case reflect.Ptr: + if val.IsNil() { + return mw.WriteNil() + } + return mw.WriteIntf(val.Elem().Interface()) + case reflect.Slice: + return mw.writeSlice(val) + case reflect.Map: + return mw.writeMap(val) + } + return &ErrUnsupportedType{T: val.Type()} +} + +func (mw *Writer) writeMap(v reflect.Value) (err error) { + if v.Type().Key().Kind() != reflect.String { + return errors.New("msgp: map keys must be strings") + } + ks := v.MapKeys() + err = mw.WriteMapHeader(uint32(len(ks))) + if err != nil { + return + } + for _, key := range ks { + val := v.MapIndex(key) + err = mw.WriteString(key.String()) + if err != nil { + return + } + err = mw.WriteIntf(val.Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeSlice(v reflect.Value) (err error) { + // is []byte + if v.Type().ConvertibleTo(btsType) { + return mw.WriteBytes(v.Bytes()) + } + + sz := uint32(v.Len()) + err = mw.WriteArrayHeader(sz) + if err != nil { + return + } + for i := uint32(0); i < sz; i++ { + err = mw.WriteIntf(v.Index(int(i)).Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeStruct(v reflect.Value) error { + if enc, ok := v.Interface().(Encodable); ok { + return enc.EncodeMsg(mw) + } + return fmt.Errorf("msgp: unsupported type: %s", v.Type()) +} + +func (mw *Writer) writeVal(v reflect.Value) error { + if !isSupported(v.Kind()) { + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) + } + + // shortcut for nil values + if v.IsNil() { + return mw.WriteNil() + } + switch v.Kind() { + case reflect.Bool: + return mw.WriteBool(v.Bool()) + + case reflect.Float32, reflect.Float64: + return mw.WriteFloat64(v.Float()) + + case reflect.Complex64, reflect.Complex128: + return mw.WriteComplex128(v.Complex()) + + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8: + return mw.WriteInt64(v.Int()) + + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + mw.WriteNil() + } + return mw.writeVal(v.Elem()) + + case reflect.Map: + return mw.writeMap(v) + + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8: + return mw.WriteUint64(v.Uint()) + + case reflect.String: + return mw.WriteString(v.String()) + + case reflect.Slice, reflect.Array: + return mw.writeSlice(v) + + case reflect.Struct: + return mw.writeStruct(v) + + } + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) +} + +// is the reflect.Kind encodable? +func isSupported(k reflect.Kind) bool { + switch k { + case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer: + return false + default: + return true + } +} + +// GuessSize guesses the size of the underlying +// value of 'i'. If the underlying value is not +// a simple builtin (or []byte), GuessSize defaults +// to 512. +func GuessSize(i interface{}) int { + if i == nil { + return NilSize + } + + switch i := i.(type) { + case Sizer: + return i.Msgsize() + case Extension: + return ExtensionPrefixSize + i.Len() + case float64: + return Float64Size + case float32: + return Float32Size + case uint8, uint16, uint32, uint64, uint: + return UintSize + case int8, int16, int32, int64, int: + return IntSize + case []byte: + return BytesPrefixSize + len(i) + case string: + return StringPrefixSize + len(i) + case complex64: + return Complex64Size + case complex128: + return Complex128Size + case bool: + return BoolSize + case map[string]interface{}: + s := MapHeaderSize + for key, val := range i { + s += StringPrefixSize + len(key) + GuessSize(val) + } + return s + case map[string]string: + s := MapHeaderSize + for key, val := range i { + s += 2*StringPrefixSize + len(key) + len(val) + } + return s + default: + return 512 + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go new file mode 100644 index 00000000..93d6d764 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go @@ -0,0 +1,431 @@ +package msgp + +import ( + "math" + "reflect" + "time" +) + +// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b) +func ensure(b []byte, sz int) ([]byte, int) { + l := len(b) + c := cap(b) + if c-l < sz { + o := make([]byte, (2*c)+sz) // exponential growth + n := copy(o, b) + return o[:n+sz], n + } + return b[:l+sz], l +} + +// AppendMapHeader appends a map header with the +// given size to the slice +func AppendMapHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixmap(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], mmap16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], mmap32, sz) + return o + } +} + +// AppendArrayHeader appends an array header with +// the given size to the slice +func AppendArrayHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixarray(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], marray16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], marray32, sz) + return o + } +} + +// AppendNil appends a 'nil' byte to the slice +func AppendNil(b []byte) []byte { return append(b, mnil) } + +// AppendFloat64 appends a float64 to the slice +func AppendFloat64(b []byte, f float64) []byte { + o, n := ensure(b, Float64Size) + prefixu64(o[n:], mfloat64, math.Float64bits(f)) + return o +} + +// AppendFloat32 appends a float32 to the slice +func AppendFloat32(b []byte, f float32) []byte { + o, n := ensure(b, Float32Size) + prefixu32(o[n:], mfloat32, math.Float32bits(f)) + return o +} + +// AppendInt64 appends an int64 to the slice +func AppendInt64(b []byte, i int64) []byte { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return append(b, wfixint(uint8(i))) + case i <= math.MaxInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i <= math.MaxInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } + } + switch { + case i >= -32: + return append(b, wnfixint(int8(i))) + case i >= math.MinInt8: + o, n := ensure(b, 2) + putMint8(o[n:], int8(i)) + return o + case i >= math.MinInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i >= math.MinInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } +} + +// AppendInt appends an int to the slice +func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt8 appends an int8 to the slice +func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt16 appends an int16 to the slice +func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt32 appends an int32 to the slice +func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) } + +// AppendUint64 appends a uint64 to the slice +func AppendUint64(b []byte, u uint64) []byte { + switch { + case u <= (1<<7)-1: + return append(b, wfixint(uint8(u))) + + case u <= math.MaxUint8: + o, n := ensure(b, 2) + putMuint8(o[n:], uint8(u)) + return o + + case u <= math.MaxUint16: + o, n := ensure(b, 3) + putMuint16(o[n:], uint16(u)) + return o + + case u <= math.MaxUint32: + o, n := ensure(b, 5) + putMuint32(o[n:], uint32(u)) + return o + + default: + o, n := ensure(b, 9) + putMuint64(o[n:], u) + return o + + } +} + +// AppendUint appends a uint to the slice +func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint8 appends a uint8 to the slice +func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) } + +// AppendByte is analogous to AppendUint8 +func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) } + +// AppendUint16 appends a uint16 to the slice +func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint32 appends a uint32 to the slice +func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) } + +// AppendBytes appends bytes to the slice as MessagePack 'bin' data +func AppendBytes(b []byte, bts []byte) []byte { + sz := len(bts) + var o []byte + var n int + switch { + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mbin8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mbin16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mbin32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], bts)] +} + +// AppendBytesHeader appends an 'bin' header with +// the given size to the slice. +func AppendBytesHeader(b []byte, sz uint32) []byte { + var o []byte + var n int + switch { + case sz <= math.MaxUint8: + o, n = ensure(b, 2) + prefixu8(o[n:], mbin8, uint8(sz)) + return o + case sz <= math.MaxUint16: + o, n = ensure(b, 3) + prefixu16(o[n:], mbin16, uint16(sz)) + return o + } + o, n = ensure(b, 5) + prefixu32(o[n:], mbin32, sz) + return o +} + +// AppendBool appends a bool to the slice +func AppendBool(b []byte, t bool) []byte { + if t { + return append(b, mtrue) + } + return append(b, mfalse) +} + +// AppendString appends a string as a MessagePack 'str' to the slice +func AppendString(b []byte, s string) []byte { + sz := len(s) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], s)] +} + +// AppendStringFromBytes appends a []byte +// as a MessagePack 'str' to the slice 'b.' +func AppendStringFromBytes(b []byte, str []byte) []byte { + sz := len(str) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], str)] +} + +// AppendComplex64 appends a complex64 to the slice as a MessagePack extension +func AppendComplex64(b []byte, c complex64) []byte { + o, n := ensure(b, Complex64Size) + o[n] = mfixext8 + o[n+1] = Complex64Extension + big.PutUint32(o[n+2:], math.Float32bits(real(c))) + big.PutUint32(o[n+6:], math.Float32bits(imag(c))) + return o +} + +// AppendComplex128 appends a complex128 to the slice as a MessagePack extension +func AppendComplex128(b []byte, c complex128) []byte { + o, n := ensure(b, Complex128Size) + o[n] = mfixext16 + o[n+1] = Complex128Extension + big.PutUint64(o[n+2:], math.Float64bits(real(c))) + big.PutUint64(o[n+10:], math.Float64bits(imag(c))) + return o +} + +// AppendTime appends a time.Time to the slice as a MessagePack extension +func AppendTime(b []byte, t time.Time) []byte { + o, n := ensure(b, TimeSize) + t = t.UTC() + o[n] = mext8 + o[n+1] = 12 + o[n+2] = TimeExtension + putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond())) + return o +} + +// AppendMapStrStr appends a map[string]string to the slice +// as a MessagePack map with 'str'-type keys and values +func AppendMapStrStr(b []byte, m map[string]string) []byte { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + for key, val := range m { + b = AppendString(b, key) + b = AppendString(b, val) + } + return b +} + +// AppendMapStrIntf appends a map[string]interface{} to the slice +// as a MessagePack map with 'str'-type keys. +func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + var err error + for key, val := range m { + b = AppendString(b, key) + b, err = AppendIntf(b, val) + if err != nil { + return b, err + } + } + return b, nil +} + +// AppendIntf appends the concrete type of 'i' to the +// provided []byte. 'i' must be one of the following: +// - 'nil' +// - A bool, float, string, []byte, int, uint, or complex +// - A map[string]interface{} or map[string]string +// - A []T, where T is another supported type +// - A *T, where T is another supported type +// - A type that satisfieds the msgp.Marshaler interface +// - A type that satisfies the msgp.Extension interface +func AppendIntf(b []byte, i interface{}) ([]byte, error) { + if i == nil { + return AppendNil(b), nil + } + + // all the concrete types + // for which we have methods + switch i := i.(type) { + case Marshaler: + return i.MarshalMsg(b) + case Extension: + return AppendExtension(b, i) + case bool: + return AppendBool(b, i), nil + case float32: + return AppendFloat32(b, i), nil + case float64: + return AppendFloat64(b, i), nil + case complex64: + return AppendComplex64(b, i), nil + case complex128: + return AppendComplex128(b, i), nil + case string: + return AppendString(b, i), nil + case []byte: + return AppendBytes(b, i), nil + case int8: + return AppendInt8(b, i), nil + case int16: + return AppendInt16(b, i), nil + case int32: + return AppendInt32(b, i), nil + case int64: + return AppendInt64(b, i), nil + case int: + return AppendInt64(b, int64(i)), nil + case uint: + return AppendUint64(b, uint64(i)), nil + case uint8: + return AppendUint8(b, i), nil + case uint16: + return AppendUint16(b, i), nil + case uint32: + return AppendUint32(b, i), nil + case uint64: + return AppendUint64(b, i), nil + case time.Time: + return AppendTime(b, i), nil + case map[string]interface{}: + return AppendMapStrIntf(b, i) + case map[string]string: + return AppendMapStrStr(b, i), nil + case []interface{}: + b = AppendArrayHeader(b, uint32(len(i))) + var err error + for _, k := range i { + b, err = AppendIntf(b, k) + if err != nil { + return b, err + } + } + return b, nil + } + + var err error + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Array, reflect.Slice: + l := v.Len() + b = AppendArrayHeader(b, uint32(l)) + for i := 0; i < l; i++ { + b, err = AppendIntf(b, v.Index(i).Interface()) + if err != nil { + return b, err + } + } + return b, nil + case reflect.Ptr: + if v.IsNil() { + return AppendNil(b), err + } + b, err = AppendIntf(b, v.Elem().Interface()) + return b, err + default: + return b, &ErrUnsupportedType{T: v.Type()} + } +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc b/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc new file mode 100644 index 00000000..8b7f044a --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc @@ -0,0 +1,4 @@ +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml b/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml new file mode 100644 index 00000000..e2ce06c4 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml @@ -0,0 +1,20 @@ +sudo: false +language: go + +go: + - 1.15.x + - 1.16.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/msgpack + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go + env GOPATH)/bin v1.31.0 diff --git a/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md new file mode 100644 index 00000000..f6b19d5b --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md @@ -0,0 +1,51 @@ +## [5.3.5](https://github.com/vmihailenco/msgpack/compare/v5.3.4...v5.3.5) (2021-10-22) + + + +## v5 + +### Added + +- `DecodeMap` is split into `DecodeMap`, `DecodeTypedMap`, and `DecodeUntypedMap`. +- New msgpack extensions API. + +### Changed + +- `Reset*` functions also reset flags. +- `SetMapDecodeFunc` is renamed to `SetMapDecoder`. +- `StructAsArray` is renamed to `UseArrayEncodedStructs`. +- `SortMapKeys` is renamed to `SetSortMapKeys`. + +### Removed + +- `UseJSONTag` is removed. Use `SetCustomStructTag("json")` instead. + +## v4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and + DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old + behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of + DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old + format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. + There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/v5/LICENSE b/vendor/github.com/vmihailenco/msgpack/v5/LICENSE new file mode 100644 index 00000000..b749d070 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/v5/Makefile b/vendor/github.com/vmihailenco/msgpack/v5/Makefile new file mode 100644 index 00000000..e9aade78 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/Makefile @@ -0,0 +1,6 @@ +test: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet diff --git a/vendor/github.com/vmihailenco/msgpack/v5/README.md b/vendor/github.com/vmihailenco/msgpack/v5/README.md new file mode 100644 index 00000000..66ad98b9 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/README.md @@ -0,0 +1,86 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg)](https://travis-ci.org/vmihailenco/msgpack) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/vmihailenco/msgpack/v5)](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5) +[![Documentation](https://img.shields.io/badge/msgpack-documentation-informational)](https://msgpack.uptrace.dev/) +[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) + +> :heart: +> [**Uptrace.dev** - All-in-one tool to optimize performance and monitor errors & logs](https://uptrace.dev/?utm_source=gh-msgpack&utm_campaign=gh-msgpack-var2) + +- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions. +- [Documentation](https://msgpack.uptrace.dev) +- [Reference](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5) +- [Examples](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#pkg-examples) + +Other projects you may like: + +- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. +- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. + +## Features + +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine \*datastore.Key and datastore.Cursor. +- [CustomEncoder]/[CustomDecoder] interfaces for custom encoding. +- [Extensions](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-RegisterExt) to encode + type information. +- Renaming fields via `msgpack:"my_field_name"` and alias via `msgpack:"alias:another_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all + [empty fields in a struct](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Marshal-OmitEmpty). +- [Map keys sorting](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.SetSortMapKeys). +- Encoding/decoding all + [structs as arrays](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.UseArrayEncodedStructs) + or + [individual structs](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Marshal-AsArray). +- [Encoder.SetCustomStructTag] with [Decoder.SetCustomStructTag] can turn msgpack into drop-in + replacement for any tag. +- Simple but very fast and efficient + [queries](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Decoder.Query). + +[customencoder]: https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#CustomEncoder +[customdecoder]: https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#CustomDecoder +[encoder.setcustomstructtag]: + https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.SetCustomStructTag +[decoder.setcustomstructtag]: + https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Decoder.SetCustomStructTag + +## Installation + +msgpack supports 2 last Go versions and requires support for +[Go modules](https://github.com/golang/go/wiki/Modules). So make sure to initialize a Go module: + +```shell +go mod init github.com/my/repo +``` + +And then install msgpack/v5 (note _v5_ in the import; omitting it is a popular mistake): + +```shell +go get github.com/vmihailenco/msgpack/v5 +``` + +## Quickstart + +```go +import "github.com/vmihailenco/msgpack/v5" + +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` diff --git a/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js b/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js new file mode 100644 index 00000000..4fedde6d --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js @@ -0,0 +1 @@ +module.exports = { extends: ['@commitlint/config-conventional'] } diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode.go b/vendor/github.com/vmihailenco/msgpack/v5/decode.go new file mode 100644 index 00000000..5df40e5d --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode.go @@ -0,0 +1,663 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + looseInterfaceDecodingFlag uint32 = 1 << iota + disallowUnknownFieldsFlag +) + +const ( + bytesAllocLimit = 1e6 // 1mb + sliceAllocLimit = 1e4 + maxMapSize = 1e6 +) + +type bufReader interface { + io.Reader + io.ByteScanner +} + +//------------------------------------------------------------------------------ + +var decPool = sync.Pool{ + New: func() interface{} { + return NewDecoder(nil) + }, +} + +func GetDecoder() *Decoder { + return decPool.Get().(*Decoder) +} + +func PutDecoder(dec *Decoder) { + dec.r = nil + dec.s = nil + decPool.Put(dec) +} + +//------------------------------------------------------------------------------ + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + dec := GetDecoder() + + dec.Reset(bytes.NewReader(data)) + err := dec.Decode(v) + + PutDecoder(dec) + + return err +} + +// A Decoder reads and decodes MessagePack values from an input stream. +type Decoder struct { + r io.Reader + s io.ByteScanner + buf []byte + + rec []byte // accumulates read data if not nil + + dict []string + flags uint32 + structTag string + mapDecoder func(*Decoder) (interface{}, error) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the requested msgpack values. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + d.Reset(r) + return d +} + +// Reset discards any buffered data, resets all state, and switches the buffered +// reader to read from r. +func (d *Decoder) Reset(r io.Reader) { + d.ResetDict(r, nil) +} + +// ResetDict is like Reset, but also resets the dict. +func (d *Decoder) ResetDict(r io.Reader, dict []string) { + d.resetReader(r) + d.flags = 0 + d.structTag = "" + d.mapDecoder = nil + d.dict = dict +} + +func (d *Decoder) WithDict(dict []string, fn func(*Decoder) error) error { + oldDict := d.dict + d.dict = dict + err := fn(d) + d.dict = oldDict + return err +} + +func (d *Decoder) resetReader(r io.Reader) { + if br, ok := r.(bufReader); ok { + d.r = br + d.s = br + } else { + br := bufio.NewReader(r) + d.r = br + d.s = br + } +} + +func (d *Decoder) SetMapDecoder(fn func(*Decoder) (interface{}, error)) { + d.mapDecoder = fn +} + +// UseLooseInterfaceDecoding causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseLooseInterfaceDecoding(on bool) { + if on { + d.flags |= looseInterfaceDecodingFlag + } else { + d.flags &= ^looseInterfaceDecodingFlag + } +} + +// SetCustomStructTag causes the decoder to use the supplied tag as a fallback option +// if there is no msgpack tag. +func (d *Decoder) SetCustomStructTag(tag string) { + d.structTag = tag +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields(on bool) { + if on { + d.flags |= disallowUnknownFieldsFlag + } else { + d.flags &= ^disallowUnknownFieldsFlag + } +} + +// UseInternedStrings enables support for decoding interned strings. +func (d *Decoder) UseInternedStrings(on bool) { + if on { + d.flags |= useInternedStringsFlag + } else { + d.flags &= ^useInternedStringsFlag + } +} + +// Buffered returns a reader of the data remaining in the Decoder's buffer. +// The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.r +} + +//nolint:gocyclo +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(non-pointer %T)", v) + } + if vv.IsNil() { + return fmt.Errorf("msgpack: Decode(non-settable %T)", v) + } + + vv = vv.Elem() + if vv.Kind() == reflect.Interface { + if !vv.IsNil() { + vv = vv.Elem() + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(non-pointer %s)", vv.Type().String()) + } + } + } + + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.flags&looseInterfaceDecodingFlag != 0 { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != msgpcode.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c byte) (bool, error) { + if c == msgpcode.Nil { + return false, nil + } + if c == msgpcode.False { + return false, nil + } + if c == msgpcode.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +func (d *Decoder) DecodeDuration() (time.Duration, error) { + n, err := d.DecodeInt64() + if err != nil { + return 0, err + } + return time.Duration(n), nil +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if msgpcode.IsFixedNum(c) { + return int8(c), nil + } + if msgpcode.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + } + if msgpcode.IsFixedArray(c) { + return d.decodeSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.string(c) + } + + switch c { + case msgpcode.Nil: + return nil, nil + case msgpcode.False, msgpcode.True: + return d.bool(c) + case msgpcode.Float: + return d.float32(c) + case msgpcode.Double: + return d.float64(c) + case msgpcode.Uint8: + return d.uint8() + case msgpcode.Uint16: + return d.uint16() + case msgpcode.Uint32: + return d.uint32() + case msgpcode.Uint64: + return d.uint64() + case msgpcode.Int8: + return d.int8() + case msgpcode.Int16: + return d.int16() + case msgpcode.Int32: + return d.int32() + case msgpcode.Int64: + return d.int64() + case msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.bytes(c, nil) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32: + return d.string(c) + case msgpcode.Array16, msgpcode.Array32: + return d.decodeSlice(c) + case msgpcode.Map16, msgpcode.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.decodeInterfaceExt(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +// - []byte is converted to string. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if msgpcode.IsFixedNum(c) { + return int64(int8(c)), nil + } + if msgpcode.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + } + if msgpcode.IsFixedArray(c) { + return d.decodeSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.string(c) + } + + switch c { + case msgpcode.Nil: + return nil, nil + case msgpcode.False, msgpcode.True: + return d.bool(c) + case msgpcode.Float, msgpcode.Double: + return d.float64(c) + case msgpcode.Uint8, msgpcode.Uint16, msgpcode.Uint32, msgpcode.Uint64: + return d.uint(c) + case msgpcode.Int8, msgpcode.Int16, msgpcode.Int32, msgpcode.Int64: + return d.int(c) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32, + msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.string(c) + case msgpcode.Array16, msgpcode.Array32: + return d.decodeSlice(c) + case msgpcode.Map16, msgpcode.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.decodeInterfaceExt(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if msgpcode.IsFixedNum(c) { + return nil + } + if msgpcode.IsFixedMap(c) { + return d.skipMap(c) + } + if msgpcode.IsFixedArray(c) { + return d.skipSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case msgpcode.Nil, msgpcode.False, msgpcode.True: + return nil + case msgpcode.Uint8, msgpcode.Int8: + return d.skipN(1) + case msgpcode.Uint16, msgpcode.Int16: + return d.skipN(2) + case msgpcode.Uint32, msgpcode.Int32, msgpcode.Float: + return d.skipN(4) + case msgpcode.Uint64, msgpcode.Int64, msgpcode.Double: + return d.skipN(8) + case msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.skipBytes(c) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32: + return d.skipBytes(c) + case msgpcode.Array16, msgpcode.Array32: + return d.skipSlice(c) + case msgpcode.Map16, msgpcode.Map32: + return d.skipMap(c) + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +func (d *Decoder) DecodeRaw() (RawMessage, error) { + d.rec = make([]byte, 0) + if err := d.Skip(); err != nil { + return nil, err + } + msg := RawMessage(d.rec) + d.rec = nil + return msg, nil +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes defines the list of available msgpcode. +func (d *Decoder) PeekCode() (byte, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return c, d.s.UnreadByte() +} + +// ReadFull reads exactly len(buf) bytes into the buf. +func (d *Decoder) ReadFull(buf []byte) error { + _, err := readN(d.r, buf, len(buf)) + return err +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == msgpcode.Nil +} + +func (d *Decoder) readCode() (byte, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return c, nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + var err error + d.buf, err = readN(d.r, d.buf, n) + if err != nil { + return nil, err + } + if d.rec != nil { + // TODO: read directly into d.rec? + d.rec = append(d.rec, d.buf...) + } + return d.buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + switch { + case n < 64: + b = make([]byte, 0, 64) + case n <= bytesAllocLimit: + b = make([]byte, 0, n) + default: + b = make([]byte, 0, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := min(n-len(b), bytesAllocLimit) + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return b, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { //nolint:unparam + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go new file mode 100644 index 00000000..52e0526c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go @@ -0,0 +1,339 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var errArrayStruct = errors.New("msgpack: number of fields in array-encoded struct has changed") + +var ( + mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) + mapStringStringType = mapStringStringPtrType.Elem() +) + +var ( + mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) + mapStringInterfaceType = mapStringInterfacePtrType.Elem() +) + +func decodeMapValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if n == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + v.Set(reflect.MakeMap(typ)) + } + if n == 0 { + return nil + } + + return d.decodeTypedMapValue(v, n) +} + +func (d *Decoder) decodeMapDefault() (interface{}, error) { + if d.mapDecoder != nil { + return d.mapDecoder(d) + } + return d.DecodeMap() +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if msgpcode.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } + if c >= msgpcode.FixedMapLow && c <= msgpcode.FixedMapHigh { + return int(c & msgpcode.FixedMapMask), nil + } + if c == msgpcode.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == msgpcode.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, unexpectedCodeError{code: c, hint: "map length"} +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]string, min(size, maxMapSize)) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + m, err := d.DecodeMap() + if err != nil { + return err + } + *ptr = m + return nil +} + +func (d *Decoder) DecodeMap() (map[string]interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + + if n == -1 { + return nil, nil + } + + m := make(map[string]interface{}, min(n, maxMapSize)) + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + + return m, nil +} + +func (d *Decoder) DecodeUntypedMap() (map[interface{}]interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + + if n == -1 { + return nil, nil + } + + m := make(map[interface{}]interface{}, min(n, maxMapSize)) + + for i := 0; i < n; i++ { + mk, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + m[mk] = mv + } + + return m, nil +} + +// DecodeTypedMap decodes a typed map. Typed map is a map that has a fixed type for keys and values. +// Key and value types may be different. +func (d *Decoder) DecodeTypedMap() (interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if n <= 0 { + return nil, nil + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + + if !keyType.Comparable() { + return nil, fmt.Errorf("msgpack: unsupported map key: %s", keyType.String()) + } + + mapType := reflect.MapOf(keyType, valueType) + mapValue := reflect.MakeMap(mapType) + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + + n-- + if err := d.decodeTypedMapValue(mapValue, n); err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeTypedMapValue(v reflect.Value, n int) error { + typ := v.Type() + keyType := typ.Key() + valueType := typ.Elem() + + for i := 0; i < n; i++ { + mk := reflect.New(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := reflect.New(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +func (d *Decoder) skipMap(c byte) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.mapLen(c) + if err == nil { + return d.decodeStruct(v, n) + } + + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return err + } + + if n <= 0 { + v.Set(reflect.Zero(v.Type())) + return nil + } + + fields := structs.Fields(v.Type(), d.structTag) + if n != len(fields.List) { + return errArrayStruct + } + + for _, f := range fields.List { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeStruct(v reflect.Value, n int) error { + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + + fields := structs.Fields(v.Type(), d.structTag) + for i := 0; i < n; i++ { + name, err := d.decodeStringTemp() + if err != nil { + return err + } + + if f := fields.Map[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + continue + } + + if d.flags&disallowUnknownFieldsFlag != 0 { + return fmt.Errorf("msgpack: unknown field %q", name) + } + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go new file mode 100644 index 00000000..45d6a741 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go @@ -0,0 +1,295 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return c, nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c byte) (uint64, error) { + if c == msgpcode.Nil { + return 0, nil + } + if msgpcode.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case msgpcode.Uint8: + n, err := d.uint8() + return uint64(n), err + case msgpcode.Int8: + n, err := d.int8() + return uint64(n), err + case msgpcode.Uint16: + n, err := d.uint16() + return uint64(n), err + case msgpcode.Int16: + n, err := d.int16() + return uint64(n), err + case msgpcode.Uint32: + n, err := d.uint32() + return uint64(n), err + case msgpcode.Int32: + n, err := d.int32() + return uint64(n), err + case msgpcode.Uint64, msgpcode.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c byte) (int64, error) { + if c == msgpcode.Nil { + return 0, nil + } + if msgpcode.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case msgpcode.Uint8: + n, err := d.uint8() + return int64(n), err + case msgpcode.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case msgpcode.Uint16: + n, err := d.uint16() + return int64(n), err + case msgpcode.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case msgpcode.Uint32: + n, err := d.uint32() + return int64(n), err + case msgpcode.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case msgpcode.Uint64, msgpcode.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c byte) (float32, error) { + if c == msgpcode.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c byte) (float64, error) { + switch c { + case msgpcode.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case msgpcode.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go new file mode 100644 index 00000000..c302ed1f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go @@ -0,0 +1,158 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +type queryResult struct { + query string + key string + hasAsterisk bool + + values []interface{} +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == msgpcode.Map16 || code == msgpcode.Map32 || msgpcode.IsFixedMap(code): + err = d.queryMapKey(q) + case code == msgpcode.Array16 || code == msgpcode.Array32 || msgpcode.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + key, err := d.decodeStringTemp() + if err != nil { + return err + } + + if key == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go new file mode 100644 index 00000000..db6f7c54 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go @@ -0,0 +1,191 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } else if c >= msgpcode.FixedArrayLow && c <= msgpcode.FixedArrayHigh { + return int(c & msgpcode.FixedArrayMask), nil + } + switch c { + case msgpcode.Array16: + n, err := d.uint16() + return int(n), err + case msgpcode.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := makeStrings(*ptr, n) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func makeStrings(s []string, n int) []string { + if n > sliceAllocLimit { + n = sliceAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + for i := 0; i < n; i++ { + if i >= v.Len() { + v.Set(growSliceValue(v, n)) + } + elem := v.Index(i) + if err := d.DecodeValue(elem); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int) reflect.Value { + diff := n - v.Len() + if diff > sliceAllocLimit { + diff = sliceAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c byte) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, min(n, sliceAllocLimit)) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c byte) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go new file mode 100644 index 00000000..e837e08b --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go @@ -0,0 +1,192 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func (d *Decoder) bytesLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } + + if msgpcode.IsFixedString(c) { + return int(c & msgpcode.FixedStrMask), nil + } + + switch c { + case msgpcode.Str8, msgpcode.Bin8: + n, err := d.uint8() + return int(n), err + case msgpcode.Str16, msgpcode.Bin16: + n, err := d.uint16() + return int(n), err + case msgpcode.Str32, msgpcode.Bin32: + n, err := d.uint32() + return int(n), err + } + + return 0, fmt.Errorf("msgpack: invalid code=%x decoding string/bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + if intern := d.flags&useInternedStringsFlag != 0; intern || len(d.dict) > 0 { + return d.decodeInternedString(intern) + } + + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c byte) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + return d.stringWithLen(n) +} + +func (d *Decoder) stringWithLen(n int) (string, error) { + if n <= 0 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c byte, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) decodeStringTemp() (string, error) { + if intern := d.flags&useInternedStringsFlag != 0; intern || len(d.dict) > 0 { + return d.decodeInternedString(intern) + } + + c, err := d.readCode() + if err != nil { + return "", err + } + + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + if n == -1 { + return "", nil + } + + b, err := d.readN(n) + if err != nil { + return "", err + } + + return bytesToString(b), nil +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c byte, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n <= 0 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go new file mode 100644 index 00000000..d2ff2aea --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go @@ -0,0 +1,250 @@ +package msgpack + +import ( + "encoding" + "errors" + "fmt" + "reflect" +) + +var ( + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() +) + +var valueDecoders []decoderFunc + +//nolint:gochecknoinits +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func getDecoder(typ reflect.Type) decoderFunc { + if v, ok := typeDecMap.Load(typ); ok { + return v.(decoderFunc) + } + fn := _getDecoder(typ) + typeDecMap.Store(typ, fn) + return fn +} + +func _getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if _, ok := typeDecMap.Load(typ.Elem()); ok { + return ptrValueDecoder(typ) + } + } + + if typ.Implements(customDecoderType) { + return nilAwareDecoder(typ, decodeCustomValue) + } + if typ.Implements(unmarshalerType) { + return nilAwareDecoder(typ, unmarshalValue) + } + if typ.Implements(binaryUnmarshalerType) { + return nilAwareDecoder(typ, unmarshalBinaryValue) + } + if typ.Implements(textUnmarshalerType) { + return nilAwareDecoder(typ, unmarshalTextValue) + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return addrDecoder(nilAwareDecoder(typ, decodeCustomValue)) + } + if ptr.Implements(unmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalValue)) + } + if ptr.Implements(binaryUnmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalBinaryValue)) + } + if ptr.Implements(textUnmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalTextValue)) + } + } + + switch kind { + case reflect.Ptr: + return ptrValueDecoder(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return decodeBytesValue + } + if elem == stringType { + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + + return valueDecoders[kind] +} + +func ptrValueDecoder(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if !v.IsNil() { + v.Set(reflect.Zero(v.Type())) + } + return d.DecodeNil() + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return decoder(d, v.Elem()) + } +} + +func addrDecoder(fn decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return fn(d, v.Addr()) + } +} + +func nilAwareDecoder(typ reflect.Type, fn decoderFunc) decoderFunc { + if nilable(typ.Kind()) { + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return fn(d, v) + } + } + + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + return fn(d, v) + } +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + return d.DecodeValue(v.Elem()) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} + +//------------------------------------------------------------------------------ + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + var b []byte + + d.rec = make([]byte, 0, 64) + if err := d.Skip(); err != nil { + return err + } + b = d.rec + d.rec = nil + + unmarshaler := v.Interface().(Unmarshaler) + return unmarshaler.UnmarshalMsgpack(b) +} + +func unmarshalBinaryValue(d *Decoder, v reflect.Value) error { + data, err := d.DecodeBytes() + if err != nil { + return err + } + + unmarshaler := v.Interface().(encoding.BinaryUnmarshaler) + return unmarshaler.UnmarshalBinary(data) +} + +func unmarshalTextValue(d *Decoder, v reflect.Value) error { + data, err := d.DecodeBytes() + if err != nil { + return err + } + + unmarshaler := v.Interface().(encoding.TextUnmarshaler) + return unmarshaler.UnmarshalText(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode.go b/vendor/github.com/vmihailenco/msgpack/v5/encode.go new file mode 100644 index 00000000..0ef6212e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode.go @@ -0,0 +1,269 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + sortMapKeysFlag uint32 = 1 << iota + arrayEncodedStructsFlag + useCompactIntsFlag + useCompactFloatsFlag + useInternedStringsFlag + omitEmptyFlag +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +type byteWriter struct { + io.Writer +} + +func newByteWriter(w io.Writer) byteWriter { + return byteWriter{ + Writer: w, + } +} + +func (bw byteWriter) WriteByte(c byte) error { + _, err := bw.Write([]byte{c}) + return err +} + +//------------------------------------------------------------------------------ + +var encPool = sync.Pool{ + New: func() interface{} { + return NewEncoder(nil) + }, +} + +func GetEncoder() *Encoder { + return encPool.Get().(*Encoder) +} + +func PutEncoder(enc *Encoder) { + enc.w = nil + encPool.Put(enc) +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + enc := GetEncoder() + + var buf bytes.Buffer + enc.Reset(&buf) + + err := enc.Encode(v) + b := buf.Bytes() + + PutEncoder(enc) + + if err != nil { + return nil, err + } + return b, err +} + +type Encoder struct { + w writer + + buf []byte + timeBuf []byte + + dict map[string]int + + flags uint32 + structTag string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + buf: make([]byte, 9), + } + e.Reset(w) + return e +} + +// Writer returns the Encoder's writer. +func (e *Encoder) Writer() io.Writer { + return e.w +} + +// Reset discards any buffered data, resets all state, and switches the writer to write to w. +func (e *Encoder) Reset(w io.Writer) { + e.ResetDict(w, nil) +} + +// ResetDict is like Reset, but also resets the dict. +func (e *Encoder) ResetDict(w io.Writer, dict map[string]int) { + e.resetWriter(w) + e.flags = 0 + e.structTag = "" + e.dict = dict +} + +func (e *Encoder) WithDict(dict map[string]int, fn func(*Encoder) error) error { + oldDict := e.dict + e.dict = dict + err := fn(e) + e.dict = oldDict + return err +} + +func (e *Encoder) resetWriter(w io.Writer) { + if bw, ok := w.(writer); ok { + e.w = bw + } else { + e.w = newByteWriter(w) + } +} + +// SetSortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]interface{} +func (e *Encoder) SetSortMapKeys(on bool) *Encoder { + if on { + e.flags |= sortMapKeysFlag + } else { + e.flags &= ^sortMapKeysFlag + } + return e +} + +// SetCustomStructTag causes the Encoder to use a custom struct tag as +// fallback option if there is no msgpack tag. +func (e *Encoder) SetCustomStructTag(tag string) { + e.structTag = tag +} + +// SetOmitEmpty causes the Encoder to omit empty values by default. +func (e *Encoder) SetOmitEmpty(on bool) { + if on { + e.flags |= omitEmptyFlag + } else { + e.flags &= ^omitEmptyFlag + } +} + +// UseArrayEncodedStructs causes the Encoder to encode Go structs as msgpack arrays. +func (e *Encoder) UseArrayEncodedStructs(on bool) { + if on { + e.flags |= arrayEncodedStructsFlag + } else { + e.flags &= ^arrayEncodedStructsFlag + } +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode small Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactInts(on bool) { + if on { + e.flags |= useCompactIntsFlag + } else { + e.flags &= ^useCompactIntsFlag + } +} + +// UseCompactFloats causes the Encoder to chose a compact integer encoding +// for floats that can be represented as integers. +func (e *Encoder) UseCompactFloats(on bool) { + if on { + e.flags |= useCompactFloatsFlag + } else { + e.flags &= ^useCompactFloatsFlag + } +} + +// UseInternedStrings causes the Encoder to intern strings. +func (e *Encoder) UseInternedStrings(on bool) { + if on { + e.flags |= useInternedStringsFlag + } else { + e.flags &= ^useInternedStringsFlag + } +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.EncodeInt(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.EncodeUint(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(msgpcode.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(msgpcode.True) + } + return e.writeCode(msgpcode.False) +} + +func (e *Encoder) EncodeDuration(d time.Duration) error { + return e.EncodeInt(int64(d)) +} + +func (e *Encoder) writeCode(c byte) error { + return e.w.WriteByte(c) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.Write(stringToBytes(s)) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go new file mode 100644 index 00000000..ba4c61be --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go @@ -0,0 +1,179 @@ +package msgpack + +import ( + "math" + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + iter := v.MapRange() + for iter.Next() { + if err := e.EncodeValue(iter.Key()); err != nil { + return err + } + if err := e.EncodeValue(iter.Value()); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.flags&sortMapKeysFlag != 0 { + return e.EncodeMapSorted(m) + } + return e.EncodeMap(m) +} + +func (e *Encoder) EncodeMap(m map[string]interface{}) error { + if m == nil { + return e.EncodeNil() + } + if err := e.EncodeMapLen(len(m)); err != nil { + return err + } + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeMapSorted(m map[string]interface{}) error { + if m == nil { + return e.EncodeNil() + } + if err := e.EncodeMapLen(len(m)); err != nil { + return err + } + + keys := make([]string, 0, len(m)) + + for k := range m { + keys = append(keys, k) + } + + sort.Strings(keys) + + for _, k := range keys { + if err := e.EncodeString(k); err != nil { + return err + } + if err := e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(msgpcode.FixedMapLow | byte(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Map16, uint16(l)) + } + return e.write4(msgpcode.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + structFields := structs.Fields(strct.Type(), e.structTag) + if e.flags&arrayEncodedStructsFlag != 0 || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(strct, e.flags&omitEmptyFlag != 0) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go new file mode 100644 index 00000000..63c311bf --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go @@ -0,0 +1,252 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(msgpcode.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(msgpcode.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(msgpcode.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(msgpcode.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(msgpcode.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(msgpcode.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(msgpcode.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(msgpcode.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(n) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(msgpcode.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(n) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + if e.flags&useCompactFloatsFlag != 0 { + if float32(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write4(msgpcode.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + if e.flags&useCompactFloatsFlag != 0 { + // Both NaN and Inf convert to int64(-0x8000000000000000) + // If n is NaN then it never compares true with any other value + // If n is Inf then it doesn't convert from int64 back to +/-Inf + // In both cases the comparison works. + if float64(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write8(msgpcode.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code byte, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = code + e.buf[1] = n + return e.write(e.buf) +} + +func (e *Encoder) write2(code byte, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = code + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code byte, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = code + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code byte, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = code + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUintValue(e *Encoder, v reflect.Value) error { + return e.EncodeUint(v.Uint()) +} + +func encodeIntValue(e *Encoder, v reflect.Value) error { + return e.EncodeInt(v.Int()) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go new file mode 100644 index 00000000..ca46eada --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go @@ -0,0 +1,139 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var stringSliceType = reflect.TypeOf(([]string)(nil)) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(msgpcode.Bin8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Bin16, uint16(l)) + } + return e.write4(msgpcode.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStringLen(l int) error { + if l < 32 { + return e.writeCode(msgpcode.FixedStrLow | byte(l)) + } + if l < 256 { + return e.write1(msgpcode.Str8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Str16, uint16(l)) + } + return e.write4(msgpcode.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if intern := e.flags&useInternedStringsFlag != 0; intern || len(e.dict) > 0 { + return e.encodeInternedString(v, intern) + } + return e.encodeNormalString(v) +} + +func (e *Encoder) encodeNormalString(v string) error { + if err := e.encodeStringLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(msgpcode.FixedArrayLow | byte(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Array16, uint16(l)) + } + return e.write4(msgpcode.Array32, uint32(l)) +} + +func encodeStringSliceValue(e *Encoder, v reflect.Value) error { + ss := v.Convert(stringSliceType).Interface().([]string) + return e.encodeStringSlice(ss) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go new file mode 100644 index 00000000..48cf489f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go @@ -0,0 +1,245 @@ +package msgpack + +import ( + "encoding" + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +//nolint:gochecknoinits +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeIntValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUintValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if v, ok := typeEncMap.Load(typ); ok { + return v.(encoderFunc) + } + fn := _getEncoder(typ) + typeEncMap.Store(typ, fn) + return fn +} + +func _getEncoder(typ reflect.Type) encoderFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if _, ok := typeEncMap.Load(typ.Elem()); ok { + return ptrEncoderFunc(typ) + } + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + if typ.Implements(binaryMarshalerType) { + return marshalBinaryValue + } + if typ.Implements(textMarshalerType) { + return marshalTextValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + if ptr.Implements(binaryMarshalerType) { + return marshalBinaryValueAddr + } + if ptr.Implements(textMarshalerType) { + return marshalTextValueAddr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + if elem == stringType { + return encodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} + +func nilable(kind reflect.Kind) bool { + switch kind { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +//------------------------------------------------------------------------------ + +func marshalBinaryValueAddr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalBinaryValue(e, v.Addr()) +} + +func marshalBinaryValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(encoding.BinaryMarshaler) + data, err := marshaler.MarshalBinary() + if err != nil { + return err + } + + return e.EncodeBytes(data) +} + +//------------------------------------------------------------------------------ + +func marshalTextValueAddr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalTextValue(e, v.Addr()) +} + +func marshalTextValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(encoding.TextMarshaler) + data, err := marshaler.MarshalText() + if err != nil { + return err + } + + return e.EncodeBytes(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/ext.go b/vendor/github.com/vmihailenco/msgpack/v5/ext.go new file mode 100644 index 00000000..76e11603 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/ext.go @@ -0,0 +1,303 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +type extInfo struct { + Type reflect.Type + Decoder func(d *Decoder, v reflect.Value, extLen int) error +} + +var extTypes = make(map[int8]*extInfo) + +type MarshalerUnmarshaler interface { + Marshaler + Unmarshaler +} + +func RegisterExt(extID int8, value MarshalerUnmarshaler) { + RegisterExtEncoder(extID, value, func(e *Encoder, v reflect.Value) ([]byte, error) { + marshaler := v.Interface().(Marshaler) + return marshaler.MarshalMsgpack() + }) + RegisterExtDecoder(extID, value, func(d *Decoder, v reflect.Value, extLen int) error { + b, err := d.readN(extLen) + if err != nil { + return err + } + return v.Interface().(Unmarshaler).UnmarshalMsgpack(b) + }) +} + +func UnregisterExt(extID int8) { + unregisterExtEncoder(extID) + unregisterExtDecoder(extID) +} + +func RegisterExtEncoder( + extID int8, + value interface{}, + encoder func(enc *Encoder, v reflect.Value) ([]byte, error), +) { + unregisterExtEncoder(extID) + + typ := reflect.TypeOf(value) + extEncoder := makeExtEncoder(extID, typ, encoder) + typeEncMap.Store(extID, typ) + typeEncMap.Store(typ, extEncoder) + if typ.Kind() == reflect.Ptr { + typeEncMap.Store(typ.Elem(), makeExtEncoderAddr(extEncoder)) + } +} + +func unregisterExtEncoder(extID int8) { + t, ok := typeEncMap.Load(extID) + if !ok { + return + } + typeEncMap.Delete(extID) + typ := t.(reflect.Type) + typeEncMap.Delete(typ) + if typ.Kind() == reflect.Ptr { + typeEncMap.Delete(typ.Elem()) + } +} + +func makeExtEncoder( + extID int8, + typ reflect.Type, + encoder func(enc *Encoder, v reflect.Value) ([]byte, error), +) encoderFunc { + nilable := typ.Kind() == reflect.Ptr + + return func(e *Encoder, v reflect.Value) error { + if nilable && v.IsNil() { + return e.EncodeNil() + } + + b, err := encoder(e, v) + if err != nil { + return err + } + + if err := e.EncodeExtHeader(extID, len(b)); err != nil { + return err + } + + return e.write(b) + } +} + +func makeExtEncoderAddr(extEncoder encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return extEncoder(e, v.Addr()) + } +} + +func RegisterExtDecoder( + extID int8, + value interface{}, + decoder func(dec *Decoder, v reflect.Value, extLen int) error, +) { + unregisterExtDecoder(extID) + + typ := reflect.TypeOf(value) + extDecoder := makeExtDecoder(extID, typ, decoder) + extTypes[extID] = &extInfo{ + Type: typ, + Decoder: decoder, + } + + typeDecMap.Store(extID, typ) + typeDecMap.Store(typ, extDecoder) + if typ.Kind() == reflect.Ptr { + typeDecMap.Store(typ.Elem(), makeExtDecoderAddr(extDecoder)) + } +} + +func unregisterExtDecoder(extID int8) { + t, ok := typeDecMap.Load(extID) + if !ok { + return + } + typeDecMap.Delete(extID) + delete(extTypes, extID) + typ := t.(reflect.Type) + typeDecMap.Delete(typ) + if typ.Kind() == reflect.Ptr { + typeDecMap.Delete(typ.Elem()) + } +} + +func makeExtDecoder( + wantedExtID int8, + typ reflect.Type, + decoder func(d *Decoder, v reflect.Value, extLen int) error, +) decoderFunc { + return nilAwareDecoder(typ, func(d *Decoder, v reflect.Value) error { + extID, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + if extID != wantedExtID { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", extID, wantedExtID) + } + return decoder(d, v, extLen) + }) +} + +func makeExtDecoderAddr(extDecoder decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return extDecoder(d, v.Addr()) + } +} + +func (e *Encoder) EncodeExtHeader(extID int8, extLen int) error { + if err := e.encodeExtLen(extLen); err != nil { + return err + } + if err := e.w.WriteByte(byte(extID)); err != nil { + return err + } + return nil +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(msgpcode.FixExt1) + case 2: + return e.writeCode(msgpcode.FixExt2) + case 4: + return e.writeCode(msgpcode.FixExt4) + case 8: + return e.writeCode(msgpcode.FixExt8) + case 16: + return e.writeCode(msgpcode.FixExt16) + } + if l <= math.MaxUint8 { + return e.write1(msgpcode.Ext8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Ext16, uint16(l)) + } + return e.write4(msgpcode.Ext32, uint32(l)) +} + +func (d *Decoder) DecodeExtHeader() (extID int8, extLen int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.extHeader(c) +} + +func (d *Decoder) extHeader(c byte) (int8, int, error) { + extLen, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + extID, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(extID), extLen, nil +} + +func (d *Decoder) parseExtLen(c byte) (int, error) { + switch c { + case msgpcode.FixExt1: + return 1, nil + case msgpcode.FixExt2: + return 2, nil + case msgpcode.FixExt4: + return 4, nil + case msgpcode.FixExt8: + return 8, nil + case msgpcode.FixExt16: + return 16, nil + case msgpcode.Ext8: + n, err := d.uint8() + return int(n), err + case msgpcode.Ext16: + n, err := d.uint16() + return int(n), err + case msgpcode.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext len", c) + } +} + +func (d *Decoder) decodeInterfaceExt(c byte) (interface{}, error) { + extID, extLen, err := d.extHeader(c) + if err != nil { + return nil, err + } + + info, ok := extTypes[extID] + if !ok { + return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) + } + + v := reflect.New(info.Type).Elem() + if nilable(v.Kind()) && v.IsNil() { + v.Set(reflect.New(info.Type.Elem())) + } + + if err := info.Decoder(d, v, extLen); err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c byte) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c byte) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c byte) int { + switch c { + case msgpcode.Ext8: + return 1 + case msgpcode.Ext16: + return 2 + case msgpcode.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/intern.go b/vendor/github.com/vmihailenco/msgpack/v5/intern.go new file mode 100644 index 00000000..be0316a8 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/intern.go @@ -0,0 +1,238 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + minInternedStringLen = 3 + maxDictLen = math.MaxUint16 +) + +var internedStringExtID = int8(math.MinInt8) + +func init() { + extTypes[internedStringExtID] = &extInfo{ + Type: stringType, + Decoder: decodeInternedStringExt, + } +} + +func decodeInternedStringExt(d *Decoder, v reflect.Value, extLen int) error { + idx, err := d.decodeInternedStringIndex(extLen) + if err != nil { + return err + } + + s, err := d.internedStringAtIndex(idx) + if err != nil { + return err + } + + v.SetString(s) + return nil +} + +//------------------------------------------------------------------------------ + +func encodeInternedInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + v = v.Elem() + if v.Kind() == reflect.String { + return e.encodeInternedString(v.String(), true) + } + return e.EncodeValue(v) +} + +func encodeInternedStringValue(e *Encoder, v reflect.Value) error { + return e.encodeInternedString(v.String(), true) +} + +func (e *Encoder) encodeInternedString(s string, intern bool) error { + // Interned string takes at least 3 bytes. Plain string 1 byte + string len. + if len(s) >= minInternedStringLen { + if idx, ok := e.dict[s]; ok { + return e.encodeInternedStringIndex(idx) + } + + if intern && len(e.dict) < maxDictLen { + if e.dict == nil { + e.dict = make(map[string]int) + } + idx := len(e.dict) + e.dict[s] = idx + } + } + + return e.encodeNormalString(s) +} + +func (e *Encoder) encodeInternedStringIndex(idx int) error { + if idx <= math.MaxUint8 { + if err := e.writeCode(msgpcode.FixExt1); err != nil { + return err + } + return e.write1(byte(internedStringExtID), uint8(idx)) + } + + if idx <= math.MaxUint16 { + if err := e.writeCode(msgpcode.FixExt2); err != nil { + return err + } + return e.write2(byte(internedStringExtID), uint16(idx)) + } + + if uint64(idx) <= math.MaxUint32 { + if err := e.writeCode(msgpcode.FixExt4); err != nil { + return err + } + return e.write4(byte(internedStringExtID), uint32(idx)) + } + + return fmt.Errorf("msgpack: interned string index=%d is too large", idx) +} + +//------------------------------------------------------------------------------ + +func decodeInternedInterfaceValue(d *Decoder, v reflect.Value) error { + s, err := d.decodeInternedString(true) + if err == nil { + v.Set(reflect.ValueOf(s)) + return nil + } + if err != nil { + if _, ok := err.(unexpectedCodeError); !ok { + return err + } + } + + if err := d.s.UnreadByte(); err != nil { + return err + } + return decodeInterfaceValue(d, v) +} + +func decodeInternedStringValue(d *Decoder, v reflect.Value) error { + s, err := d.decodeInternedString(true) + if err != nil { + return err + } + + v.SetString(s) + return nil +} + +func (d *Decoder) decodeInternedString(intern bool) (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + + if msgpcode.IsFixedString(c) { + n := int(c & msgpcode.FixedStrMask) + return d.decodeInternedStringWithLen(n, intern) + } + + switch c { + case msgpcode.Nil: + return "", nil + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4: + typeID, extLen, err := d.extHeader(c) + if err != nil { + return "", err + } + if typeID != internedStringExtID { + err := fmt.Errorf("msgpack: got ext type=%d, wanted %d", + typeID, internedStringExtID) + return "", err + } + + idx, err := d.decodeInternedStringIndex(extLen) + if err != nil { + return "", err + } + + return d.internedStringAtIndex(idx) + case msgpcode.Str8, msgpcode.Bin8: + n, err := d.uint8() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + case msgpcode.Str16, msgpcode.Bin16: + n, err := d.uint16() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + case msgpcode.Str32, msgpcode.Bin32: + n, err := d.uint32() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + } + + return "", unexpectedCodeError{ + code: c, + hint: "interned string", + } +} + +func (d *Decoder) decodeInternedStringIndex(extLen int) (int, error) { + switch extLen { + case 1: + n, err := d.uint8() + if err != nil { + return 0, err + } + return int(n), nil + case 2: + n, err := d.uint16() + if err != nil { + return 0, err + } + return int(n), nil + case 4: + n, err := d.uint32() + if err != nil { + return 0, err + } + return int(n), nil + } + + err := fmt.Errorf("msgpack: unsupported ext len=%d decoding interned string", extLen) + return 0, err +} + +func (d *Decoder) internedStringAtIndex(idx int) (string, error) { + if idx >= len(d.dict) { + err := fmt.Errorf("msgpack: interned string at index=%d does not exist", idx) + return "", err + } + return d.dict[idx], nil +} + +func (d *Decoder) decodeInternedStringWithLen(n int, intern bool) (string, error) { + if n <= 0 { + return "", nil + } + + s, err := d.stringWithLen(n) + if err != nil { + return "", err + } + + if intern && len(s) >= minInternedStringLen && len(d.dict) < maxDictLen { + d.dict = append(d.dict, s) + } + + return s, nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go b/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go new file mode 100644 index 00000000..4db2fa2c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go @@ -0,0 +1,52 @@ +package msgpack + +import "fmt" + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} + +//------------------------------------------------------------------------------ + +type RawMessage []byte + +var ( + _ CustomEncoder = (RawMessage)(nil) + _ CustomDecoder = (*RawMessage)(nil) +) + +func (m RawMessage) EncodeMsgpack(enc *Encoder) error { + return enc.write(m) +} + +func (m *RawMessage) DecodeMsgpack(dec *Decoder) error { + msg, err := dec.DecodeRaw() + if err != nil { + return err + } + *m = msg + return nil +} + +//------------------------------------------------------------------------------ + +type unexpectedCodeError struct { + code byte + hint string +} + +func (err unexpectedCodeError) Error() string { + return fmt.Sprintf("msgpack: unexpected code=%x decoding %s", err.code, err.hint) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go b/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go new file mode 100644 index 00000000..e35389cc --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go @@ -0,0 +1,88 @@ +package msgpcode + +var ( + PosFixedNumHigh byte = 0x7f + NegFixedNumLow byte = 0xe0 + + Nil byte = 0xc0 + + False byte = 0xc2 + True byte = 0xc3 + + Float byte = 0xca + Double byte = 0xcb + + Uint8 byte = 0xcc + Uint16 byte = 0xcd + Uint32 byte = 0xce + Uint64 byte = 0xcf + + Int8 byte = 0xd0 + Int16 byte = 0xd1 + Int32 byte = 0xd2 + Int64 byte = 0xd3 + + FixedStrLow byte = 0xa0 + FixedStrHigh byte = 0xbf + FixedStrMask byte = 0x1f + Str8 byte = 0xd9 + Str16 byte = 0xda + Str32 byte = 0xdb + + Bin8 byte = 0xc4 + Bin16 byte = 0xc5 + Bin32 byte = 0xc6 + + FixedArrayLow byte = 0x90 + FixedArrayHigh byte = 0x9f + FixedArrayMask byte = 0xf + Array16 byte = 0xdc + Array32 byte = 0xdd + + FixedMapLow byte = 0x80 + FixedMapHigh byte = 0x8f + FixedMapMask byte = 0xf + Map16 byte = 0xde + Map32 byte = 0xdf + + FixExt1 byte = 0xd4 + FixExt2 byte = 0xd5 + FixExt4 byte = 0xd6 + FixExt8 byte = 0xd7 + FixExt16 byte = 0xd8 + Ext8 byte = 0xc7 + Ext16 byte = 0xc8 + Ext32 byte = 0xc9 +) + +func IsFixedNum(c byte) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c byte) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c byte) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c byte) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c byte) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsBin(c byte) bool { + return c == Bin8 || c == Bin16 || c == Bin32 +} + +func IsFixedExt(c byte) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c byte) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/package.json b/vendor/github.com/vmihailenco/msgpack/v5/package.json new file mode 100644 index 00000000..298910d4 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/package.json @@ -0,0 +1,4 @@ +{ + "name": "msgpack", + "version": "5.3.5" +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/safe.go b/vendor/github.com/vmihailenco/msgpack/v5/safe.go new file mode 100644 index 00000000..8352c9dc --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/safe.go @@ -0,0 +1,13 @@ +// +build appengine + +package msgpack + +// bytesToString converts byte slice to string. +func bytesToString(b []byte) string { + return string(b) +} + +// stringToBytes converts string to byte slice. +func stringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/time.go b/vendor/github.com/vmihailenco/msgpack/v5/time.go new file mode 100644 index 00000000..44566ec0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/time.go @@ -0,0 +1,145 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var timeExtID int8 = -1 + +func init() { + RegisterExtEncoder(timeExtID, time.Time{}, timeEncoder) + RegisterExtDecoder(timeExtID, time.Time{}, timeDecoder) +} + +func timeEncoder(e *Encoder, v reflect.Value) ([]byte, error) { + return e.encodeTime(v.Interface().(time.Time)), nil +} + +func timeDecoder(d *Decoder, v reflect.Value, extLen int) error { + tm, err := d.decodeTime(extLen) + if err != nil { + return err + } + + ptr := v.Addr().Interface().(*time.Time) + *ptr = tm + + return nil +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtID)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + if e.timeBuf == nil { + e.timeBuf = make([]byte, 12) + } + + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + + if data&0xffffffff00000000 == 0 { + b := e.timeBuf[:4] + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } + + b := e.timeBuf[:8] + binary.BigEndian.PutUint64(b, data) + return b + } + + b := e.timeBuf[:12] + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], secs) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == msgpcode.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if msgpcode.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extID, extLen, err := d.extHeader(c) + if err != nil { + return time.Time{}, err + } + + if extID != timeExtID { + return time.Time{}, fmt.Errorf("msgpack: invalid time ext id=%d", extID) + } + + tm, err := d.decodeTime(extLen) + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime(extLen int) (time.Time, error) { + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/types.go b/vendor/github.com/vmihailenco/msgpack/v5/types.go new file mode 100644 index 00000000..69aca611 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/types.go @@ -0,0 +1,407 @@ +package msgpack + +import ( + "encoding" + "fmt" + "log" + "reflect" + "sync" + + "github.com/vmihailenco/tagparser/v2" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var ( + customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() + customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() +) + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +var ( + binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() +) + +var ( + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +type ( + encoderFunc func(*Encoder, reflect.Value) error + decoderFunc func(*Decoder, reflect.Value) error +) + +var ( + typeEncMap sync.Map + typeDecMap sync.Map +) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// CustomEncoder/CustomDecoder or Marshaler/Unmarshaler interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typeEncMap.Store(typ, enc) + } + if dec != nil { + typeDecMap.Store(typ, dec) + } +} + +//------------------------------------------------------------------------------ + +const defaultStructTag = "msgpack" + +var structs = newStructCache() + +type structCache struct { + m sync.Map +} + +type structCacheKey struct { + tag string + typ reflect.Type +} + +func newStructCache() *structCache { + return new(structCache) +} + +func (m *structCache) Fields(typ reflect.Type, tag string) *fields { + key := structCacheKey{tag: tag, typ: typ} + + if v, ok := m.m.Load(key); ok { + return v.(*fields) + } + + fs := getFields(typ, tag) + m.m.Store(key, fs) + + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + name string + index []int + omitEmpty bool + encoder encoderFunc + decoder decoderFunc +} + +func (f *field) Omit(strct reflect.Value, forced bool) bool { + v, ok := fieldByIndex(strct, f.index) + if !ok { + return true + } + return (f.omitEmpty || forced) && isEmptyValue(v) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + v, ok := fieldByIndex(strct, f.index) + if !ok { + return e.EncodeNil() + } + return f.encoder(e, v) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + v := fieldByIndexAlloc(strct, f.index) + return f.decoder(d, v) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Type reflect.Type + Map map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(typ reflect.Type) *fields { + return &fields{ + Type: typ, + Map: make(map[string]*field, typ.NumField()), + List: make([]*field, 0, typ.NumField()), + } +} + +func (fs *fields) Add(field *field) { + fs.warnIfFieldExists(field.name) + fs.Map[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) warnIfFieldExists(name string) { + if _, ok := fs.Map[name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, name) + } +} + +func (fs *fields) OmitEmpty(strct reflect.Value, forced bool) []*field { + if !fs.hasOmitEmpty && !forced { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + + for _, f := range fs.List { + if !f.Omit(strct, forced) { + fields = append(fields, f) + } + } + + return fields +} + +func getFields(typ reflect.Type, fallbackTag string) *fields { + fs := newFields(typ) + + var omitEmpty bool + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + + tagStr := f.Tag.Get(defaultStructTag) + if tagStr == "" && fallbackTag != "" { + tagStr = f.Tag.Get(fallbackTag) + } + + tag := tagparser.Parse(tagStr) + if tag.Name == "-" { + continue + } + + if f.Name == "_msgpack" { + fs.AsArray = tag.HasOption("as_array") || tag.HasOption("asArray") + if tag.HasOption("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: tag.Name, + index: f.Index, + omitEmpty: omitEmpty || tag.HasOption("omitempty"), + } + + if tag.HasOption("intern") { + switch f.Type.Kind() { + case reflect.Interface: + field.encoder = encodeInternedInterfaceValue + field.decoder = decodeInternedInterfaceValue + case reflect.String: + field.encoder = encodeInternedStringValue + field.decoder = decodeInternedStringValue + default: + err := fmt.Errorf("msgpack: intern strings are not supported on %s", f.Type) + panic(err) + } + } else { + field.encoder = getEncoder(f.Type) + field.decoder = getDecoder(f.Type) + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !tag.HasOption("noinline") { + inline := tag.HasOption("inline") + if inline { + inlineFields(fs, f.Type, field, fallbackTag) + } else { + inline = shouldInline(fs, f.Type, field, fallbackTag) + } + + if inline { + if _, ok := fs.Map[field.name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, field.name) + } + fs.Map[field.name] = field + continue + } + } + + fs.Add(field) + + if alias, ok := tag.Options["alias"]; ok { + fs.warnIfFieldExists(alias) + fs.Map[alias] = field + } + } + return fs +} + +var ( + encodeStructValuePtr uintptr + decodeStructValuePtr uintptr +) + +//nolint:gochecknoinits +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, tag string) { + inlinedFields := getFields(typ, tag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func shouldInline(fs *fields, typ reflect.Type, f *field, tag string) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, tag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +type isZeroer interface { + IsZero() bool +} + +func isEmptyValue(v reflect.Value) bool { + kind := v.Kind() + + for kind == reflect.Interface { + if v.IsNil() { + return true + } + v = v.Elem() + kind = v.Kind() + } + + if z, ok := v.Interface().(isZeroer); ok { + return nilable(kind) && v.IsNil() || z.IsZero() + } + + switch kind { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Ptr: + return v.IsNil() + default: + return false + } +} + +func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, ok bool) { + if len(index) == 1 { + return v.Field(index[0]), true + } + + for i, idx := range index { + if i > 0 { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return v, false + } + v = v.Elem() + } + } + v = v.Field(idx) + } + + return v, true +} + +func fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + + for i, idx := range index { + if i > 0 { + var ok bool + v, ok = indirectNil(v) + if !ok { + return v + } + } + v = v.Field(idx) + } + + return v +} + +func indirectNil(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(reflect.New(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/unsafe.go b/vendor/github.com/vmihailenco/msgpack/v5/unsafe.go new file mode 100644 index 00000000..192ac479 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package msgpack + +import ( + "unsafe" +) + +// bytesToString converts byte slice to string. +func bytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// stringToBytes converts string to byte slice. +func stringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/version.go b/vendor/github.com/vmihailenco/msgpack/v5/version.go new file mode 100644 index 00000000..1d49337c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/version.go @@ -0,0 +1,6 @@ +package msgpack + +// Version is the current release version. +func Version() string { + return "5.3.5" +} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml b/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml new file mode 100644 index 00000000..7194cd00 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml @@ -0,0 +1,19 @@ +dist: xenial +language: go + +go: + - 1.14.x + - 1.15.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/tagparser + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 diff --git a/vendor/github.com/vmihailenco/tagparser/v2/LICENSE b/vendor/github.com/vmihailenco/tagparser/v2/LICENSE new file mode 100644 index 00000000..3fc93fdf --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 The github.com/vmihailenco/tagparser Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/tagparser/v2/Makefile b/vendor/github.com/vmihailenco/tagparser/v2/Makefile new file mode 100644 index 00000000..0b1b5959 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/Makefile @@ -0,0 +1,9 @@ +all: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet ./... + go get github.com/gordonklaus/ineffassign + ineffassign . + golangci-lint run diff --git a/vendor/github.com/vmihailenco/tagparser/v2/README.md b/vendor/github.com/vmihailenco/tagparser/v2/README.md new file mode 100644 index 00000000..c0259de5 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/README.md @@ -0,0 +1,24 @@ +# Opinionated Golang tag parser + +[![Build Status](https://travis-ci.org/vmihailenco/tagparser.png?branch=master)](https://travis-ci.org/vmihailenco/tagparser) +[![GoDoc](https://godoc.org/github.com/vmihailenco/tagparser?status.svg)](https://godoc.org/github.com/vmihailenco/tagparser) + +## Installation + +Install: + +```shell +go get github.com/vmihailenco/tagparser/v2 +``` + +## Quickstart + +```go +func ExampleParse() { + tag := tagparser.Parse("some_name,key:value,key2:'complex value'") + fmt.Println(tag.Name) + fmt.Println(tag.Options) + // Output: some_name + // map[key:value key2:'complex value'] +} +``` diff --git a/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go new file mode 100644 index 00000000..21a9bc7f --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go @@ -0,0 +1,82 @@ +package parser + +import ( + "bytes" + + "github.com/vmihailenco/tagparser/v2/internal" +) + +type Parser struct { + b []byte + i int +} + +func New(b []byte) *Parser { + return &Parser{ + b: b, + } +} + +func NewString(s string) *Parser { + return New(internal.StringToBytes(s)) +} + +func (p *Parser) Bytes() []byte { + return p.b[p.i:] +} + +func (p *Parser) Valid() bool { + return p.i < len(p.b) +} + +func (p *Parser) Read() byte { + if p.Valid() { + c := p.b[p.i] + p.Advance() + return c + } + return 0 +} + +func (p *Parser) Peek() byte { + if p.Valid() { + return p.b[p.i] + } + return 0 +} + +func (p *Parser) Advance() { + p.i++ +} + +func (p *Parser) Skip(skip byte) bool { + if p.Peek() == skip { + p.Advance() + return true + } + return false +} + +func (p *Parser) SkipBytes(skip []byte) bool { + if len(skip) > len(p.b[p.i:]) { + return false + } + if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { + return false + } + p.i += len(skip) + return true +} + +func (p *Parser) ReadSep(sep byte) ([]byte, bool) { + ind := bytes.IndexByte(p.b[p.i:], sep) + if ind == -1 { + b := p.b[p.i:] + p.i = len(p.b) + return b, false + } + + b := p.b[p.i : p.i+ind] + p.i += ind + 1 + return b, true +} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go new file mode 100644 index 00000000..870fe541 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go @@ -0,0 +1,11 @@ +// +build appengine + +package internal + +func BytesToString(b []byte) string { + return string(b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go new file mode 100644 index 00000000..f8bc18d9 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package internal + +import ( + "unsafe" +) + +// BytesToString converts byte slice to string. +func BytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// StringToBytes converts string to byte slice. +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/tagparser.go b/vendor/github.com/vmihailenco/tagparser/v2/tagparser.go new file mode 100644 index 00000000..5002e645 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/tagparser.go @@ -0,0 +1,166 @@ +package tagparser + +import ( + "strings" + + "github.com/vmihailenco/tagparser/v2/internal/parser" +) + +type Tag struct { + Name string + Options map[string]string +} + +func (t *Tag) HasOption(name string) bool { + _, ok := t.Options[name] + return ok +} + +func Parse(s string) *Tag { + p := &tagParser{ + Parser: parser.NewString(s), + } + p.parseKey() + return &p.Tag +} + +type tagParser struct { + *parser.Parser + + Tag Tag + hasName bool + key string +} + +func (p *tagParser) setTagOption(key, value string) { + key = strings.TrimSpace(key) + value = strings.TrimSpace(value) + + if !p.hasName { + p.hasName = true + if key == "" { + p.Tag.Name = value + return + } + } + if p.Tag.Options == nil { + p.Tag.Options = make(map[string]string) + } + if key == "" { + p.Tag.Options[value] = "" + } else { + p.Tag.Options[key] = value + } +} + +func (p *tagParser) parseKey() { + p.key = "" + + var b []byte + for p.Valid() { + c := p.Read() + switch c { + case ',': + p.Skip(' ') + p.setTagOption("", string(b)) + p.parseKey() + return + case ':': + p.key = string(b) + p.parseValue() + return + case '\'': + p.parseQuotedValue() + return + default: + b = append(b, c) + } + } + + if len(b) > 0 { + p.setTagOption("", string(b)) + } +} + +func (p *tagParser) parseValue() { + const quote = '\'' + c := p.Peek() + if c == quote { + p.Skip(quote) + p.parseQuotedValue() + return + } + + var b []byte + for p.Valid() { + c = p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + b = p.readBrackets(b) + case ',': + p.Skip(' ') + p.setTagOption(p.key, string(b)) + p.parseKey() + return + default: + b = append(b, c) + } + } + p.setTagOption(p.key, string(b)) +} + +func (p *tagParser) readBrackets(b []byte) []byte { + var lvl int +loop: + for p.Valid() { + c := p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + lvl++ + case ')': + b = append(b, c) + lvl-- + if lvl < 0 { + break loop + } + default: + b = append(b, c) + } + } + return b +} + +func (p *tagParser) parseQuotedValue() { + const quote = '\'' + var b []byte + for p.Valid() { + bb, ok := p.ReadSep(quote) + if !ok { + b = append(b, bb...) + break + } + + // keep the escaped single-quote, and continue until we've found the + // one that isn't. + if len(bb) > 0 && bb[len(bb)-1] == '\\' { + b = append(b, bb[:len(bb)-1]...) + b = append(b, quote) + continue + } + + b = append(b, bb...) + break + } + + p.setTagOption(p.key, string(b)) + if p.Skip(',') { + p.Skip(' ') + } + p.parseKey() +} diff --git a/vendor/github.com/wiggin77/cfg/.gitignore b/vendor/github.com/wiggin77/cfg/.gitignore deleted file mode 100644 index f1c181ec..00000000 --- a/vendor/github.com/wiggin77/cfg/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out diff --git a/vendor/github.com/wiggin77/cfg/.travis.yml b/vendor/github.com/wiggin77/cfg/.travis.yml deleted file mode 100644 index 9899b387..00000000 --- a/vendor/github.com/wiggin77/cfg/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -sudo: false -before_script: - - go vet ./... - \ No newline at end of file diff --git a/vendor/github.com/wiggin77/cfg/README.md b/vendor/github.com/wiggin77/cfg/README.md deleted file mode 100644 index 583a82cb..00000000 --- a/vendor/github.com/wiggin77/cfg/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# cfg - -[![GoDoc](https://godoc.org/github.com/wiggin77/cfg?status.svg)](https://godoc.org/github.com/wiggin77/cfg) -[![Build Status](https://travis-ci.org/wiggin77/cfg.svg?branch=master)](https://travis-ci.org/wiggin77/cfg) - -Go package for app configuration. Supports chained configuration sources for multiple levels of defaults. -Includes APIs for loading Linux style configuration files (name/value pairs) or INI files, map based properties, -or easily create new configuration sources (e.g. load from database). - -Supports monitoring configuration sources for changes, hot loading properties, and notifying listeners of changes. - -## Usage - -```Go -config := &cfg.Config{} -defer config.Shutdown() // stops monitoring - -// load file via filespec string, os.File -src, err := Config.NewSrcFileFromFilespec("./myfile.conf") -if err != nil { - return err -} -// add src to top of chain, meaning first searched -cfg.PrependSource(src) - -// fetch prop 'retries', default to 3 if not found -val := config.Int("retries", 3) -``` - -See [example](./example_test.go) for more complete example, including listening for configuration changes. - -Config API parses the following data types: - -| type | method | example property values | -| ------- | ------ | -------- | -| string | Config.String | test, "" | -| int | Config.Int | -1, 77, 0 | -| int64 | Config.Int64 | -9223372036854775, 372036854775808 | -| float64 | Config.Float64 | -77.3456, 95642331.1 | -| bool | Config.Bool | T,t,true,True,1,0,False,false,f,F | -| time.Duration | Config.Duration | "10ms", "2 hours", "5 min" * | - -\* Units of measure supported: ms, sec, min, hour, day, week, year. diff --git a/vendor/github.com/wiggin77/cfg/config.go b/vendor/github.com/wiggin77/cfg/config.go deleted file mode 100644 index 0e958102..00000000 --- a/vendor/github.com/wiggin77/cfg/config.go +++ /dev/null @@ -1,366 +0,0 @@ -package cfg - -import ( - "errors" - "fmt" - "strconv" - "strings" - "sync" - "time" - - "github.com/wiggin77/cfg/timeconv" -) - -// ErrNotFound returned when an operation is attempted on a -// resource that doesn't exist, such as fetching a non-existing -// property name. -var ErrNotFound = errors.New("not found") - -type sourceEntry struct { - src Source - props map[string]string -} - -// Config provides methods for retrieving property values from one or more -// configuration sources. -type Config struct { - mutexSrc sync.RWMutex - mutexListeners sync.RWMutex - srcs []*sourceEntry - chgListeners []ChangedListener - shutdown chan interface{} - wantPanicOnError bool -} - -// PrependSource inserts one or more `Sources` at the beginning of -// the list of sources such that the first source will be the -// source checked first when resolving a property value. -func (config *Config) PrependSource(srcs ...Source) { - arr := config.wrapSources(srcs...) - - config.mutexSrc.Lock() - if config.shutdown == nil { - config.shutdown = make(chan interface{}) - } - config.srcs = append(arr, config.srcs...) - config.mutexSrc.Unlock() - - for _, se := range arr { - if _, ok := se.src.(SourceMonitored); ok { - config.monitor(se) - } - } -} - -// AppendSource appends one or more `Sources` at the end of -// the list of sources such that the last source will be the -// source checked last when resolving a property value. -func (config *Config) AppendSource(srcs ...Source) { - arr := config.wrapSources(srcs...) - - config.mutexSrc.Lock() - if config.shutdown == nil { - config.shutdown = make(chan interface{}) - } - config.srcs = append(config.srcs, arr...) - config.mutexSrc.Unlock() - - for _, se := range arr { - if _, ok := se.src.(SourceMonitored); ok { - config.monitor(se) - } - } -} - -// wrapSources wraps one or more Source's and returns -// them as an array of `sourceEntry`. -func (config *Config) wrapSources(srcs ...Source) []*sourceEntry { - arr := make([]*sourceEntry, 0, len(srcs)) - for _, src := range srcs { - se := &sourceEntry{src: src} - config.reloadProps(se) - arr = append(arr, se) - } - return arr -} - -// SetWantPanicOnError sets the flag determining if Config -// should panic when `GetProps` or `GetLastModified` errors -// for a `Source`. -func (config *Config) SetWantPanicOnError(b bool) { - config.mutexSrc.Lock() - config.wantPanicOnError = b - config.mutexSrc.Unlock() -} - -// ShouldPanicOnError gets the flag determining if Config -// should panic when `GetProps` or `GetLastModified` errors -// for a `Source`. -func (config *Config) ShouldPanicOnError() (b bool) { - config.mutexSrc.RLock() - b = config.wantPanicOnError - config.mutexSrc.RUnlock() - return b -} - -// getProp returns the value of a named property. -// Each `Source` is checked, in the order created by adding via -// `AppendSource` and `PrependSource`, until a value for the -// property is found. -func (config *Config) getProp(name string) (val string, ok bool) { - config.mutexSrc.RLock() - defer config.mutexSrc.RUnlock() - - var s string - for _, se := range config.srcs { - if se.props != nil { - if s, ok = se.props[name]; ok { - val = strings.TrimSpace(s) - return - } - } - } - return -} - -// String returns the value of the named prop as a string. -// If the property is not found then the supplied default `def` -// and `ErrNotFound` are returned. -func (config *Config) String(name string, def string) (val string, err error) { - if v, ok := config.getProp(name); ok { - val = v - err = nil - return - } - - err = ErrNotFound - val = def - return -} - -// Int returns the value of the named prop as an `int`. -// If the property is not found then the supplied default `def` -// and `ErrNotFound` are returned. -// -// See config.String -func (config *Config) Int(name string, def int) (val int, err error) { - var s string - if s, err = config.String(name, ""); err == nil { - var i int64 - if i, err = strconv.ParseInt(s, 10, 32); err == nil { - val = int(i) - } - } - if err != nil { - val = def - } - return -} - -// Int64 returns the value of the named prop as an `int64`. -// If the property is not found then the supplied default `def` -// and `ErrNotFound` are returned. -// -// See config.String -func (config *Config) Int64(name string, def int64) (val int64, err error) { - var s string - if s, err = config.String(name, ""); err == nil { - val, err = strconv.ParseInt(s, 10, 64) - } - if err != nil { - val = def - } - return -} - -// Float64 returns the value of the named prop as a `float64`. -// If the property is not found then the supplied default `def` -// and `ErrNotFound` are returned. -// -// See config.String -func (config *Config) Float64(name string, def float64) (val float64, err error) { - var s string - if s, err = config.String(name, ""); err == nil { - val, err = strconv.ParseFloat(s, 64) - } - if err != nil { - val = def - } - return -} - -// Bool returns the value of the named prop as a `bool`. -// If the property is not found then the supplied default `def` -// and `ErrNotFound` are returned. -// -// Supports (t, true, 1, y, yes) for true, and (f, false, 0, n, no) for false, -// all case-insensitive. -// -// See config.String -func (config *Config) Bool(name string, def bool) (val bool, err error) { - var s string - if s, err = config.String(name, ""); err == nil { - switch strings.ToLower(s) { - case "t", "true", "1", "y", "yes": - val = true - case "f", "false", "0", "n", "no": - val = false - default: - err = errors.New("invalid syntax") - } - } - if err != nil { - val = def - } - return -} - -// Duration returns the value of the named prop as a `time.Duration`, representing -// a span of time. -// -// Units of measure are supported: ms, sec, min, hour, day, week, year. -// See config.UnitsToMillis for a complete list of units supported. -// -// If the property is not found then the supplied default `def` -// and `ErrNotFound` are returned. -// -// See config.String -func (config *Config) Duration(name string, def time.Duration) (val time.Duration, err error) { - var s string - if s, err = config.String(name, ""); err == nil { - var ms int64 - ms, err = timeconv.ParseMilliseconds(s) - val = time.Duration(ms) * time.Millisecond - } - if err != nil { - val = def - } - return -} - -// AddChangedListener adds a listener that will receive notifications -// whenever one or more property values change within the config. -func (config *Config) AddChangedListener(l ChangedListener) { - config.mutexListeners.Lock() - defer config.mutexListeners.Unlock() - - config.chgListeners = append(config.chgListeners, l) -} - -// RemoveChangedListener removes all instances of a ChangedListener. -// Returns `ErrNotFound` if the listener was not present. -func (config *Config) RemoveChangedListener(l ChangedListener) error { - config.mutexListeners.Lock() - defer config.mutexListeners.Unlock() - - dest := make([]ChangedListener, 0, len(config.chgListeners)) - err := ErrNotFound - - // Remove all instances of the listener by - // copying list while filtering. - for _, s := range config.chgListeners { - if s != l { - dest = append(dest, s) - } else { - err = nil - } - } - config.chgListeners = dest - return err -} - -// Shutdown can be called to stop monitoring of all config sources. -func (config *Config) Shutdown() { - config.mutexSrc.RLock() - defer config.mutexSrc.RUnlock() - if config.shutdown != nil { - close(config.shutdown) - } -} - -// onSourceChanged is called whenever one or more properties of a -// config source has changed. -func (config *Config) onSourceChanged(src SourceMonitored) { - defer func() { - if p := recover(); p != nil { - fmt.Println(p) - } - }() - config.mutexListeners.RLock() - defer config.mutexListeners.RUnlock() - for _, l := range config.chgListeners { - l.ConfigChanged(config, src) - } -} - -// monitor periodically checks a config source for changes. -func (config *Config) monitor(se *sourceEntry) { - go func(se *sourceEntry, shutdown <-chan interface{}) { - var src SourceMonitored - var ok bool - if src, ok = se.src.(SourceMonitored); !ok { - return - } - paused := false - last := time.Time{} - freq := src.GetMonitorFreq() - if freq <= 0 { - paused = true - freq = 10 - last, _ = src.GetLastModified() - } - timer := time.NewTimer(freq) - for { - select { - case <-timer.C: - if !paused { - if latest, err := src.GetLastModified(); err != nil { - if config.ShouldPanicOnError() { - panic(fmt.Sprintf("error <%v> getting last modified for %v", err, src)) - } - } else { - if last.Before(latest) { - last = latest - config.reloadProps(se) - // TODO: calc diff and provide detailed changes - config.onSourceChanged(src) - } - } - } - freq = src.GetMonitorFreq() - if freq <= 0 { - paused = true - freq = 10 - } else { - paused = false - } - timer.Reset(freq) - case <-shutdown: - // stop the timer and exit - if !timer.Stop() { - <-timer.C - } - return - } - } - }(se, config.shutdown) -} - -// reloadProps causes a Source to reload its properties. -func (config *Config) reloadProps(se *sourceEntry) { - config.mutexSrc.Lock() - defer config.mutexSrc.Unlock() - - m, err := se.src.GetProps() - if err != nil { - if config.wantPanicOnError { - panic(fmt.Sprintf("GetProps error for %v", se.src)) - } - return - } - - se.props = make(map[string]string) - for k, v := range m { - se.props[k] = v - } -} diff --git a/vendor/github.com/wiggin77/cfg/ini/ini.go b/vendor/github.com/wiggin77/cfg/ini/ini.go deleted file mode 100644 index d28d7444..00000000 --- a/vendor/github.com/wiggin77/cfg/ini/ini.go +++ /dev/null @@ -1,167 +0,0 @@ -package ini - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "sync" - "time" -) - -// Ini provides parsing and querying of INI format or simple name/value pairs -// such as a simple config file. -// A name/value pair format is just an INI with no sections, and properties can -// be queried using an empty section name. -type Ini struct { - mutex sync.RWMutex - m map[string]*Section - lm time.Time -} - -// LoadFromFilespec loads an INI file from string containing path and filename. -func (ini *Ini) LoadFromFilespec(filespec string) error { - f, err := os.Open(filespec) - if err != nil { - return err - } - return ini.LoadFromFile(f) -} - -// LoadFromFile loads an INI file from `os.File`. -func (ini *Ini) LoadFromFile(file *os.File) error { - - fi, err := file.Stat() - if err != nil { - return err - } - lm := fi.ModTime() - - if err := ini.LoadFromReader(file); err != nil { - return err - } - ini.lm = lm - return nil -} - -// LoadFromReader loads an INI file from an `io.Reader`. -func (ini *Ini) LoadFromReader(reader io.Reader) error { - data, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - return ini.LoadFromString(string(data)) -} - -// LoadFromString parses an INI from a string . -func (ini *Ini) LoadFromString(s string) error { - m, err := getSections(s) - if err != nil { - return err - } - ini.mutex.Lock() - ini.m = m - ini.lm = time.Now() - ini.mutex.Unlock() - return nil -} - -// GetLastModified returns the last modified timestamp of the -// INI contents. -func (ini *Ini) GetLastModified() time.Time { - return ini.lm -} - -// GetSectionNames returns the names of all sections in this INI. -// Note, the returned section names are a snapshot in time, meaning -// other goroutines may change the contents of this INI as soon as -// the method returns. -func (ini *Ini) GetSectionNames() []string { - ini.mutex.RLock() - defer ini.mutex.RUnlock() - - arr := make([]string, 0, len(ini.m)) - for key := range ini.m { - arr = append(arr, key) - } - return arr -} - -// GetKeys returns the names of all keys in the specified section. -// Note, the returned key names are a snapshot in time, meaning other -// goroutines may change the contents of this INI as soon as the -// method returns. -func (ini *Ini) GetKeys(sectionName string) ([]string, error) { - sec, err := ini.getSection(sectionName) - if err != nil { - return nil, err - } - return sec.getKeys(), nil -} - -// getSection returns the named section. -func (ini *Ini) getSection(sectionName string) (*Section, error) { - ini.mutex.RLock() - defer ini.mutex.RUnlock() - - sec, ok := ini.m[sectionName] - if !ok { - return nil, fmt.Errorf("section '%s' not found", sectionName) - } - return sec, nil -} - -// GetFlattenedKeys returns all section names plus keys as one -// flattened array. -func (ini *Ini) GetFlattenedKeys() []string { - ini.mutex.RLock() - defer ini.mutex.RUnlock() - - arr := make([]string, 0, len(ini.m)*2) - for _, section := range ini.m { - keys := section.getKeys() - for _, key := range keys { - name := section.GetName() - if name != "" { - key = name + "." + key - } - arr = append(arr, key) - } - } - return arr -} - -// GetProp returns the value of the specified key in the named section. -func (ini *Ini) GetProp(section string, key string) (val string, ok bool) { - sec, err := ini.getSection(section) - if err != nil { - return val, false - } - return sec.GetProp(key) -} - -// ToMap returns a flattened map of the section name plus keys mapped -// to values. -func (ini *Ini) ToMap() map[string]string { - m := make(map[string]string) - - ini.mutex.RLock() - defer ini.mutex.RUnlock() - - for _, section := range ini.m { - for _, key := range section.getKeys() { - val, ok := section.GetProp(key) - if ok { - name := section.GetName() - var mapkey string - if name != "" { - mapkey = name + "." + key - } else { - mapkey = key - } - m[mapkey] = val - } - } - } - return m -} diff --git a/vendor/github.com/wiggin77/cfg/ini/parser.go b/vendor/github.com/wiggin77/cfg/ini/parser.go deleted file mode 100644 index 28916409..00000000 --- a/vendor/github.com/wiggin77/cfg/ini/parser.go +++ /dev/null @@ -1,142 +0,0 @@ -package ini - -import ( - "fmt" - "strings" - - "github.com/wiggin77/merror" -) - -// LF is linefeed -const LF byte = 0x0A - -// CR is carriage return -const CR byte = 0x0D - -// getSections parses an INI formatted string, or string containing just name/value pairs, -// returns map of `Section`'s. -// -// Any name/value pairs appearing before a section name are added to the section named -// with an empty string (""). Also true for Linux-style config files where all props -// are outside a named section. -// -// Any errors encountered are aggregated and returned, along with the partially parsed -// sections. -func getSections(str string) (map[string]*Section, error) { - merr := merror.New() - mapSections := make(map[string]*Section) - lines := buildLineArray(str) - section := newSection("") - - for _, line := range lines { - name, ok := parseSection(line) - if ok { - // A section name encountered. Stop processing the current one. - // Don't add the current section to the map if the section name is blank - // and the prop map is empty. - nameCurr := section.GetName() - if nameCurr != "" || section.hasKeys() { - mapSections[nameCurr] = section - } - // Start processing a new section. - section = newSection(name) - } else { - // Parse the property and add to the current section, or ignore if comment. - if k, v, comment, err := parseProp(line); !comment && err == nil { - section.setProp(k, v) - } else if err != nil { - merr.Append(err) // aggregate errors - } - } - - } - // If the current section is not empty, add it. - if section.hasKeys() { - mapSections[section.GetName()] = section - } - return mapSections, merr.ErrorOrNil() -} - -// buildLineArray parses the given string buffer and creates a list of strings, -// one for each line in the string buffer. -// -// A line is considered to be terminated by any one of a line feed ('\n'), -// a carriage return ('\r'), or a carriage return followed immediately by a -// linefeed. -// -// Lines prefixed with ';' or '#' are considered comments and skipped. -func buildLineArray(str string) []string { - arr := make([]string, 0, 10) - str = str + "\n" - - iLen := len(str) - iPos, iBegin := 0, 0 - var ch byte - - for iPos < iLen { - ch = str[iPos] - if ch == LF || ch == CR { - sub := str[iBegin:iPos] - sub = strings.TrimSpace(sub) - if sub != "" && !strings.HasPrefix(sub, ";") && !strings.HasPrefix(sub, "#") { - arr = append(arr, sub) - } - iPos++ - if ch == CR && iPos < iLen && str[iPos] == LF { - iPos++ - } - iBegin = iPos - } else { - iPos++ - } - } - return arr -} - -// parseSection parses the specified string for a section name enclosed in square brackets. -// Returns the section name found, or `ok=false` if `str` is not a section header. -func parseSection(str string) (name string, ok bool) { - str = strings.TrimSpace(str) - if !strings.HasPrefix(str, "[") { - return "", false - } - iCloser := strings.Index(str, "]") - if iCloser == -1 { - return "", false - } - return strings.TrimSpace(str[1:iCloser]), true -} - -// parseProp parses the specified string and extracts a key/value pair. -// -// If the string is a comment (prefixed with ';' or '#') then `comment=true` -// and key will be empty. -func parseProp(str string) (key string, val string, comment bool, err error) { - iLen := len(str) - iEqPos := strings.Index(str, "=") - if iEqPos == -1 { - return "", "", false, fmt.Errorf("not a key/value pair:'%s'", str) - } - - key = str[0:iEqPos] - key = strings.TrimSpace(key) - if iEqPos+1 < iLen { - val = str[iEqPos+1:] - val = strings.TrimSpace(val) - } - - // Check that the key has at least 1 char. - if key == "" { - return "", "", false, fmt.Errorf("key is empty for '%s'", str) - } - - // Check if this line is a comment that just happens - // to have an equals sign in it. Not an error, but not a - // useable line either. - if strings.HasPrefix(key, ";") || strings.HasPrefix(key, "#") { - key = "" - val = "" - comment = true - } - return key, val, comment, err -} diff --git a/vendor/github.com/wiggin77/cfg/ini/section.go b/vendor/github.com/wiggin77/cfg/ini/section.go deleted file mode 100644 index 18c4c254..00000000 --- a/vendor/github.com/wiggin77/cfg/ini/section.go +++ /dev/null @@ -1,109 +0,0 @@ -package ini - -import ( - "fmt" - "strings" - "sync" -) - -// Section represents a section in an INI file. The section has a name, which is -// enclosed in square brackets in the file. The section also has an array of -// key/value pairs. -type Section struct { - name string - props map[string]string - mtx sync.RWMutex -} - -func newSection(name string) *Section { - sec := &Section{} - sec.name = name - sec.props = make(map[string]string) - return sec -} - -// addLines addes an array of strings containing name/value pairs -// of the format `key=value`. -//func addLines(lines []string) { -// TODO -//} - -// GetName returns the name of the section. -func (sec *Section) GetName() (name string) { - sec.mtx.RLock() - name = sec.name - sec.mtx.RUnlock() - return -} - -// GetProp returns the value associated with the given key, or -// `ok=false` if key does not exist. -func (sec *Section) GetProp(key string) (val string, ok bool) { - sec.mtx.RLock() - val, ok = sec.props[key] - sec.mtx.RUnlock() - return -} - -// SetProp sets the value associated with the given key. -func (sec *Section) setProp(key string, val string) { - sec.mtx.Lock() - sec.props[key] = val - sec.mtx.Unlock() -} - -// hasKeys returns true if there are one or more properties in -// this section. -func (sec *Section) hasKeys() (b bool) { - sec.mtx.RLock() - b = len(sec.props) > 0 - sec.mtx.RUnlock() - return -} - -// getKeys returns an array containing all keys in this section. -func (sec *Section) getKeys() []string { - sec.mtx.RLock() - defer sec.mtx.RUnlock() - - arr := make([]string, len(sec.props)) - idx := 0 - for k := range sec.props { - arr[idx] = k - idx++ - } - return arr -} - -// combine the given section with this one. -func (sec *Section) combine(sec2 *Section) { - sec.mtx.Lock() - sec2.mtx.RLock() - defer sec.mtx.Unlock() - defer sec2.mtx.RUnlock() - - for k, v := range sec2.props { - sec.props[k] = v - } -} - -// String returns a string representation of this section. -func (sec *Section) String() string { - return fmt.Sprintf("[%s]\n%s", sec.GetName(), sec.StringPropsOnly()) -} - -// StringPropsOnly returns a string representation of this section -// without the section header. -func (sec *Section) StringPropsOnly() string { - sec.mtx.RLock() - defer sec.mtx.RUnlock() - sb := &strings.Builder{} - - for k, v := range sec.props { - sb.WriteString(k) - sb.WriteString("=") - sb.WriteString(v) - sb.WriteString("\n") - } - return sb.String() -} diff --git a/vendor/github.com/wiggin77/cfg/listener.go b/vendor/github.com/wiggin77/cfg/listener.go deleted file mode 100644 index 12ea4e45..00000000 --- a/vendor/github.com/wiggin77/cfg/listener.go +++ /dev/null @@ -1,11 +0,0 @@ -package cfg - -// ChangedListener interface is for receiving notifications -// when one or more properties within monitored config sources -// (SourceMonitored) have changed values. -type ChangedListener interface { - - // Changed is called when one or more properties in a `SourceMonitored` has a - // changed value. - ConfigChanged(cfg *Config, src SourceMonitored) -} diff --git a/vendor/github.com/wiggin77/cfg/nocopy.go b/vendor/github.com/wiggin77/cfg/nocopy.go deleted file mode 100644 index f2450c0b..00000000 --- a/vendor/github.com/wiggin77/cfg/nocopy.go +++ /dev/null @@ -1,11 +0,0 @@ -package cfg - -// noCopy may be embedded into structs which must not be copied -// after the first use. -// -// See https://golang.org/issues/8005#issuecomment-190753527 -// for details. -type noCopy struct{} - -// Lock is a no-op used by -copylocks checker from `go vet`. -func (*noCopy) Lock() {} diff --git a/vendor/github.com/wiggin77/cfg/source.go b/vendor/github.com/wiggin77/cfg/source.go deleted file mode 100644 index 09083e97..00000000 --- a/vendor/github.com/wiggin77/cfg/source.go +++ /dev/null @@ -1,58 +0,0 @@ -package cfg - -import ( - "sync" - "time" -) - -// Source is the interface required for any source of name/value pairs. -type Source interface { - - // GetProps fetches all the properties from a source and returns - // them as a map. - GetProps() (map[string]string, error) -} - -// SourceMonitored is the interface required for any config source that is -// monitored for changes. -type SourceMonitored interface { - Source - - // GetLastModified returns the time of the latest modification to any - // property value within the source. If a source does not support - // modifying properties at runtime then the zero value for `Time` - // should be returned to ensure reload events are not generated. - GetLastModified() (time.Time, error) - - // GetMonitorFreq returns the frequency as a `time.Duration` between - // checks for changes to this config source. - // - // Returning zero (or less) will temporarily suspend calls to `GetLastModified` - // and `GetMonitorFreq` will be called every 10 seconds until resumed, after which - // `GetMontitorFreq` will be called at a frequency roughly equal to the `time.Duration` - // returned. - GetMonitorFreq() time.Duration -} - -// AbstractSourceMonitor can be embedded in a custom `Source` to provide the -// basic plumbing for monitor frequency. -type AbstractSourceMonitor struct { - mutex sync.RWMutex - freq time.Duration -} - -// GetMonitorFreq returns the frequency as a `time.Duration` between -// checks for changes to this config source. -func (asm *AbstractSourceMonitor) GetMonitorFreq() (freq time.Duration) { - asm.mutex.RLock() - freq = asm.freq - asm.mutex.RUnlock() - return -} - -// SetMonitorFreq sets the frequency between checks for changes to this config source. -func (asm *AbstractSourceMonitor) SetMonitorFreq(freq time.Duration) { - asm.mutex.Lock() - asm.freq = freq - asm.mutex.Unlock() -} diff --git a/vendor/github.com/wiggin77/cfg/srcfile.go b/vendor/github.com/wiggin77/cfg/srcfile.go deleted file mode 100644 index f42c69fa..00000000 --- a/vendor/github.com/wiggin77/cfg/srcfile.go +++ /dev/null @@ -1,63 +0,0 @@ -package cfg - -import ( - "os" - "time" - - "github.com/wiggin77/cfg/ini" -) - -// SrcFile is a configuration `Source` backed by a file containing -// name/value pairs or INI format. -type SrcFile struct { - AbstractSourceMonitor - ini ini.Ini - file *os.File -} - -// NewSrcFileFromFilespec creates a new SrcFile with the specified filespec. -func NewSrcFileFromFilespec(filespec string) (*SrcFile, error) { - file, err := os.Open(filespec) - if err != nil { - return nil, err - } - return NewSrcFile(file) -} - -// NewSrcFile creates a new SrcFile with the specified os.File. -func NewSrcFile(file *os.File) (*SrcFile, error) { - sf := &SrcFile{} - sf.freq = time.Minute - sf.file = file - if err := sf.ini.LoadFromFile(file); err != nil { - return nil, err - } - return sf, nil -} - -// GetProps fetches all the properties from a source and returns -// them as a map. -func (sf *SrcFile) GetProps() (map[string]string, error) { - lm, err := sf.GetLastModified() - if err != nil { - return nil, err - } - - // Check if we need to reload. - if sf.ini.GetLastModified() != lm { - if err := sf.ini.LoadFromFile(sf.file); err != nil { - return nil, err - } - } - return sf.ini.ToMap(), nil -} - -// GetLastModified returns the time of the latest modification to any -// property value within the source. -func (sf *SrcFile) GetLastModified() (time.Time, error) { - fi, err := sf.file.Stat() - if err != nil { - return time.Now(), err - } - return fi.ModTime(), nil -} diff --git a/vendor/github.com/wiggin77/cfg/srcmap.go b/vendor/github.com/wiggin77/cfg/srcmap.go deleted file mode 100644 index 321db27a..00000000 --- a/vendor/github.com/wiggin77/cfg/srcmap.go +++ /dev/null @@ -1,78 +0,0 @@ -package cfg - -import ( - "time" -) - -// SrcMap is a configuration `Source` backed by a simple map. -type SrcMap struct { - AbstractSourceMonitor - m map[string]string - lm time.Time -} - -// NewSrcMap creates an empty `SrcMap`. -func NewSrcMap() *SrcMap { - sm := &SrcMap{} - sm.m = make(map[string]string) - sm.lm = time.Now() - sm.freq = time.Minute - return sm -} - -// NewSrcMapFromMap creates a `SrcMap` containing a copy of the -// specified map. -func NewSrcMapFromMap(mapIn map[string]string) *SrcMap { - sm := NewSrcMap() - sm.PutAll(mapIn) - return sm -} - -// Put inserts or updates a value in the `SrcMap`. -func (sm *SrcMap) Put(key string, val string) { - sm.mutex.Lock() - sm.m[key] = val - sm.lm = time.Now() - sm.mutex.Unlock() -} - -// PutAll inserts a copy of `mapIn` into the `SrcMap` -func (sm *SrcMap) PutAll(mapIn map[string]string) { - sm.mutex.Lock() - defer sm.mutex.Unlock() - - for k, v := range mapIn { - sm.m[k] = v - } - sm.lm = time.Now() -} - -// GetProps fetches all the properties from a source and returns -// them as a map. -func (sm *SrcMap) GetProps() (m map[string]string, err error) { - sm.mutex.RLock() - m = sm.m - sm.mutex.RUnlock() - return -} - -// GetLastModified returns the time of the latest modification to any -// property value within the source. If a source does not support -// modifying properties at runtime then the zero value for `Time` -// should be returned to ensure reload events are not generated. -func (sm *SrcMap) GetLastModified() (last time.Time, err error) { - sm.mutex.RLock() - last = sm.lm - sm.mutex.RUnlock() - return -} - -// GetMonitorFreq returns the frequency as a `time.Duration` between -// checks for changes to this config source. Defaults to 1 minute -// unless changed with `SetMonitorFreq`. -func (sm *SrcMap) GetMonitorFreq() (freq time.Duration) { - sm.mutex.RLock() - freq = sm.freq - sm.mutex.RUnlock() - return -} diff --git a/vendor/github.com/wiggin77/cfg/timeconv/parse.go b/vendor/github.com/wiggin77/cfg/timeconv/parse.go deleted file mode 100644 index 218ef43a..00000000 --- a/vendor/github.com/wiggin77/cfg/timeconv/parse.go +++ /dev/null @@ -1,108 +0,0 @@ -package timeconv - -import ( - "fmt" - "math" - "regexp" - "strconv" - "strings" -) - -// MillisPerSecond is the number of millseconds per second. -const MillisPerSecond int64 = 1000 - -// MillisPerMinute is the number of millseconds per minute. -const MillisPerMinute int64 = MillisPerSecond * 60 - -// MillisPerHour is the number of millseconds per hour. -const MillisPerHour int64 = MillisPerMinute * 60 - -// MillisPerDay is the number of millseconds per day. -const MillisPerDay int64 = MillisPerHour * 24 - -// MillisPerWeek is the number of millseconds per week. -const MillisPerWeek int64 = MillisPerDay * 7 - -// MillisPerYear is the approximate number of millseconds per year. -const MillisPerYear int64 = MillisPerDay*365 + int64((float64(MillisPerDay) * 0.25)) - -// ParseMilliseconds parses a string containing a number plus -// a unit of measure for time and returns the number of milliseconds -// it represents. -// -// Example: -// * "1 second" returns 1000 -// * "1 minute" returns 60000 -// * "1 hour" returns 3600000 -// -// See config.UnitsToMillis for a list of supported units of measure. -func ParseMilliseconds(str string) (int64, error) { - s := strings.TrimSpace(str) - reg := regexp.MustCompile("([0-9\\.\\-+]*)(.*)") - matches := reg.FindStringSubmatch(s) - if matches == nil || len(matches) < 1 || matches[1] == "" { - return 0, fmt.Errorf("invalid syntax - '%s'", s) - } - digits := matches[1] - units := "ms" - if len(matches) > 1 && matches[2] != "" { - units = matches[2] - } - - fDigits, err := strconv.ParseFloat(digits, 64) - if err != nil { - return 0, err - } - - msPerUnit, err := UnitsToMillis(units) - if err != nil { - return 0, err - } - - // Check for overflow. - fms := float64(msPerUnit) * fDigits - if fms > math.MaxInt64 || fms < math.MinInt64 { - return 0, fmt.Errorf("out of range - '%s' overflows", s) - } - ms := int64(fms) - return ms, nil -} - -// UnitsToMillis returns the number of milliseconds represented by the specified unit of measure. -// -// Example: -// * "second" returns 1000
-// * "minute" returns 60000
-// * "hour" returns 3600000
-// -// Supported units of measure: -// * "milliseconds", "millis", "ms", "millisecond" -// * "seconds", "sec", "s", "second" -// * "minutes", "mins", "min", "m", "minute" -// * "hours", "h", "hour" -// * "days", "d", "day" -// * "weeks", "w", "week" -// * "years", "y", "year" -func UnitsToMillis(units string) (ms int64, err error) { - u := strings.TrimSpace(units) - u = strings.ToLower(u) - switch u { - case "milliseconds", "millisecond", "millis", "ms": - ms = 1 - case "seconds", "second", "sec", "s": - ms = MillisPerSecond - case "minutes", "minute", "mins", "min", "m": - ms = MillisPerMinute - case "hours", "hour", "h": - ms = MillisPerHour - case "days", "day", "d": - ms = MillisPerDay - case "weeks", "week", "w": - ms = MillisPerWeek - case "years", "year", "y": - ms = MillisPerYear - default: - err = fmt.Errorf("invalid syntax - '%s' not a supported unit of measure", u) - } - return -} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml deleted file mode 100644 index 571116cc..00000000 --- a/vendor/go.uber.org/atomic/.codecov.yml +++ /dev/null @@ -1,19 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - -# Also update COVER_IGNORE_PKGS in the Makefile. -ignore: - - /internal/gen-atomicint/ - - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index 2e337a0e..00000000 --- a/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -/bin -.DS_Store -/vendor -cover.html -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof - -# Output of fossa analyzer -/fossa diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md deleted file mode 100644 index 38f564e2..00000000 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ /dev/null @@ -1,100 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [1.9.0] - 2021-07-15 -### Added -- Add `Float64.Swap` to match int atomic operations. -- Add `atomic.Time` type for atomic operations on `time.Time` values. - -[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 - -## [1.8.0] - 2021-06-09 -### Added -- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. -- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. - -[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 - -## [1.7.0] - 2020-09-14 -### Added -- Support JSON serialization and deserialization of primitive atomic types. -- Support Text marshalling and unmarshalling for string atomics. - -### Changed -- Disallow incorrect comparison of atomic values in a non-atomic way. - -### Removed -- Remove dependency on `golang.org/x/{lint, tools}`. - -[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 - -## [1.6.0] - 2020-02-24 -### Changed -- Drop library dependency on `golang.org/x/{lint, tools}`. - -[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 - -## [1.5.1] - 2019-11-19 -- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together - causing `CAS` to fail even though the old value matches. - -[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 - -## [1.5.0] - 2019-10-29 -### Changed -- With Go modules, only the `go.uber.org/atomic` import path is supported now. - If you need to use the old import path, please add a `replace` directive to - your `go.mod`. - -[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 - -## [1.4.0] - 2019-05-01 -### Added - - Add `atomic.Error` type for atomic operations on `error` values. - -[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 - -## [1.3.2] - 2018-05-02 -### Added -- Add `atomic.Duration` type for atomic operations on `time.Duration` values. - -[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 - -## [1.3.1] - 2017-11-14 -### Fixed -- Revert optimization for `atomic.String.Store("")` which caused data races. - -[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 - -## [1.3.0] - 2017-11-13 -### Added -- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. - -### Changed -- Optimize `atomic.String.Store("")` by avoiding an allocation. - -[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 - -## [1.2.0] - 2017-04-12 -### Added -- Shadow `atomic.Value` from `sync/atomic`. - -[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 - -## [1.1.0] - 2017-03-10 -### Added -- Add atomic `Float64` type. - -### Changed -- Support new `go.uber.org/atomic` import path. - -[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 - -## [1.0.0] - 2016-07-18 - -- Initial release. - -[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index 46c945b3..00000000 --- a/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,79 +0,0 @@ -# Directory to place `go install`ed binaries into. -export GOBIN ?= $(shell pwd)/bin - -GOLINT = $(GOBIN)/golint -GEN_ATOMICINT = $(GOBIN)/gen-atomicint -GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper -STATICCHECK = $(GOBIN)/staticcheck - -GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) - -# Also update ignore section in .codecov.yml. -COVER_IGNORE_PKGS = \ - go.uber.org/atomic/internal/gen-atomicint \ - go.uber.org/atomic/internal/gen-atomicwrapper - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck - -$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) - go build -o $@ ./internal/gen-atomicwrapper - -$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) - go build -o $@ ./internal/gen-atomicint - -.PHONY: golint -golint: $(GOLINT) - $(GOLINT) ./... - -.PHONY: staticcheck -staticcheck: $(STATICCHECK) - $(STATICCHECK) ./... - -.PHONY: lint -lint: gofmt golint staticcheck generatenodirty - -# comma separated list of packages to consider for code coverage. -COVER_PKG = $(shell \ - go list -find ./... | \ - grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ - paste -sd, -) - -.PHONY: cover -cover: - go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... - go tool cover -html=cover.out -o cover.html - -.PHONY: generate -generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) - go generate ./... - -.PHONY: generatenodirty -generatenodirty: - @[ -z "$$(git status --porcelain)" ] || ( \ - echo "Working tree is dirty. Commit your changes first."; \ - git status; \ - exit 1 ) - @make generate - @status=$$(git status --porcelain); \ - [ -z "$$status" ] || ( \ - echo "Working tree is dirty after `make generate`:"; \ - echo "$$status"; \ - echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index 96b47a1f..00000000 --- a/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation - -```shell -$ go get -u go.uber.org/atomic@v1 -``` - -### Legacy Import Path - -As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way -of using this package. If you are using Go modules, this package will fail to -compile with the legacy import path path `github.com/uber-go/atomic`. - -We recommend migrating your code to the new import path but if you're unable -to do so, or if your dependencies are still using the old import path, you -will have to add a `replace` directive to your `go.mod` file downgrading the -legacy import path to an older version. - -``` -replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 -``` - -You can do so automatically by running the following command. - -```shell -$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 -``` - -## Usage - -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status - -Stable. - ---- - -Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/atomic -[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic -[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go deleted file mode 100644 index 209df7bb..00000000 --- a/vendor/go.uber.org/atomic/bool.go +++ /dev/null @@ -1,81 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" -) - -// Bool is an atomic type-safe wrapper for bool values. -type Bool struct { - _ nocmp // disallow non-atomic comparison - - v Uint32 -} - -var _zeroBool bool - -// NewBool creates a new Bool. -func NewBool(val bool) *Bool { - x := &Bool{} - if val != _zeroBool { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped bool. -func (x *Bool) Load() bool { - return truthy(x.v.Load()) -} - -// Store atomically stores the passed bool. -func (x *Bool) Store(val bool) { - x.v.Store(boolToInt(val)) -} - -// CAS is an atomic compare-and-swap for bool values. -func (x *Bool) CAS(old, new bool) (swapped bool) { - return x.v.CAS(boolToInt(old), boolToInt(new)) -} - -// Swap atomically stores the given bool and returns the old -// value. -func (x *Bool) Swap(val bool) (old bool) { - return truthy(x.v.Swap(boolToInt(val))) -} - -// MarshalJSON encodes the wrapped bool into JSON. -func (x *Bool) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a bool from JSON. -func (x *Bool) UnmarshalJSON(b []byte) error { - var v bool - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go deleted file mode 100644 index a2e60e98..00000000 --- a/vendor/go.uber.org/atomic/bool_ext.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go - -func truthy(n uint32) bool { - return n == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() (old bool) { - for { - old := b.Load() - if b.CAS(old, !old) { - return old - } - } -} - -// String encodes the wrapped value as a string. -func (b *Bool) String() string { - return strconv.FormatBool(b.Load()) -} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go deleted file mode 100644 index ae7390ee..00000000 --- a/vendor/go.uber.org/atomic/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go deleted file mode 100644 index 207594f5..00000000 --- a/vendor/go.uber.org/atomic/duration.go +++ /dev/null @@ -1,82 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "time" -) - -// Duration is an atomic type-safe wrapper for time.Duration values. -type Duration struct { - _ nocmp // disallow non-atomic comparison - - v Int64 -} - -var _zeroDuration time.Duration - -// NewDuration creates a new Duration. -func NewDuration(val time.Duration) *Duration { - x := &Duration{} - if val != _zeroDuration { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped time.Duration. -func (x *Duration) Load() time.Duration { - return time.Duration(x.v.Load()) -} - -// Store atomically stores the passed time.Duration. -func (x *Duration) Store(val time.Duration) { - x.v.Store(int64(val)) -} - -// CAS is an atomic compare-and-swap for time.Duration values. -func (x *Duration) CAS(old, new time.Duration) (swapped bool) { - return x.v.CAS(int64(old), int64(new)) -} - -// Swap atomically stores the given time.Duration and returns the old -// value. -func (x *Duration) Swap(val time.Duration) (old time.Duration) { - return time.Duration(x.v.Swap(int64(val))) -} - -// MarshalJSON encodes the wrapped time.Duration into JSON. -func (x *Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a time.Duration from JSON. -func (x *Duration) UnmarshalJSON(b []byte) error { - var v time.Duration - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go deleted file mode 100644 index 4c18b0a9..00000000 --- a/vendor/go.uber.org/atomic/duration_ext.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(delta time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(delta))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(delta time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(delta))) -} - -// String encodes the wrapped value as a string. -func (d *Duration) String() string { - return d.Load().String() -} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go deleted file mode 100644 index 3be19c35..00000000 --- a/vendor/go.uber.org/atomic/error.go +++ /dev/null @@ -1,51 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// Error is an atomic type-safe wrapper for error values. -type Error struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroError error - -// NewError creates a new Error. -func NewError(val error) *Error { - x := &Error{} - if val != _zeroError { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped error. -func (x *Error) Load() error { - return unpackError(x.v.Load()) -} - -// Store atomically stores the passed error. -func (x *Error) Store(val error) { - x.v.Store(packError(val)) -} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go deleted file mode 100644 index ffe0be21..00000000 --- a/vendor/go.uber.org/atomic/error_ext.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// atomic.Value panics on nil inputs, or if the underlying type changes. -// Stabilize by always storing a custom struct that we control. - -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go - -type packedError struct{ Value error } - -func packError(v error) interface{} { - return packedError{v} -} - -func unpackError(v interface{}) error { - if err, ok := v.(packedError); ok { - return err.Value - } - return nil -} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go deleted file mode 100644 index 8a136718..00000000 --- a/vendor/go.uber.org/atomic/float64.go +++ /dev/null @@ -1,77 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "math" -) - -// Float64 is an atomic type-safe wrapper for float64 values. -type Float64 struct { - _ nocmp // disallow non-atomic comparison - - v Uint64 -} - -var _zeroFloat64 float64 - -// NewFloat64 creates a new Float64. -func NewFloat64(val float64) *Float64 { - x := &Float64{} - if val != _zeroFloat64 { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped float64. -func (x *Float64) Load() float64 { - return math.Float64frombits(x.v.Load()) -} - -// Store atomically stores the passed float64. -func (x *Float64) Store(val float64) { - x.v.Store(math.Float64bits(val)) -} - -// Swap atomically stores the given float64 and returns the old -// value. -func (x *Float64) Swap(val float64) (old float64) { - return math.Float64frombits(x.v.Swap(math.Float64bits(val))) -} - -// MarshalJSON encodes the wrapped float64 into JSON. -func (x *Float64) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a float64 from JSON. -func (x *Float64) UnmarshalJSON(b []byte) error { - var v float64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go deleted file mode 100644 index df36b010..00000000 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "math" - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(delta float64) float64 { - for { - old := f.Load() - new := old + delta - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(delta float64) float64 { - return f.Add(-delta) -} - -// CAS is an atomic compare-and-swap for float64 values. -// -// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CAS allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CAS loops from blocking forever, e.g., -// -// for { -// old := atom.Load() -// new = f(old) -// if atom.CAS(old, new) { -// break -// } -// } -// -// If CAS did not match NaN to match, then the above would loop forever. -func (f *Float64) CAS(old, new float64) (swapped bool) { - return f.v.CAS(math.Float64bits(old), math.Float64bits(new)) -} - -// String encodes the wrapped value as a string. -func (f *Float64) String() string { - // 'g' is the behavior for floats with %v. - return strconv.FormatFloat(f.Load(), 'g', -1, 64) -} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go deleted file mode 100644 index 1e9ef4f8..00000000 --- a/vendor/go.uber.org/atomic/gen.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go -//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go -//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go -//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go -//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go deleted file mode 100644 index 640ea36a..00000000 --- a/vendor/go.uber.org/atomic/int32.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int32 is an atomic wrapper around int32. -type Int32 struct { - _ nocmp // disallow non-atomic comparison - - v int32 -} - -// NewInt32 creates a new Int32. -func NewInt32(val int32) *Int32 { - return &Int32{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(delta int32) int32 { - return atomic.AddInt32(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(delta int32) int32 { - return atomic.AddInt32(&i.v, -delta) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) (swapped bool) { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(val int32) { - atomic.StoreInt32(&i.v, val) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(val int32) (old int32) { - return atomic.SwapInt32(&i.v, val) -} - -// MarshalJSON encodes the wrapped int32 into JSON. -func (i *Int32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int32. -func (i *Int32) UnmarshalJSON(b []byte) error { - var v int32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int32) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go deleted file mode 100644 index 9ab66b98..00000000 --- a/vendor/go.uber.org/atomic/int64.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int64 is an atomic wrapper around int64. -type Int64 struct { - _ nocmp // disallow non-atomic comparison - - v int64 -} - -// NewInt64 creates a new Int64. -func NewInt64(val int64) *Int64 { - return &Int64{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(delta int64) int64 { - return atomic.AddInt64(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(delta int64) int64 { - return atomic.AddInt64(&i.v, -delta) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) (swapped bool) { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(val int64) { - atomic.StoreInt64(&i.v, val) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(val int64) (old int64) { - return atomic.SwapInt64(&i.v, val) -} - -// MarshalJSON encodes the wrapped int64 into JSON. -func (i *Int64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int64. -func (i *Int64) UnmarshalJSON(b []byte) error { - var v int64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int64) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go deleted file mode 100644 index a8201cb4..00000000 --- a/vendor/go.uber.org/atomic/nocmp.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// nocmp is an uncomparable struct. Embed this inside another struct to make -// it uncomparable. -// -// type Foo struct { -// nocmp -// // ... -// } -// -// This DOES NOT: -// -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs -type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go deleted file mode 100644 index 80df93d0..00000000 --- a/vendor/go.uber.org/atomic/string.go +++ /dev/null @@ -1,54 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// String is an atomic type-safe wrapper for string values. -type String struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroString string - -// NewString creates a new String. -func NewString(val string) *String { - x := &String{} - if val != _zeroString { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped string. -func (x *String) Load() string { - if v := x.v.Load(); v != nil { - return v.(string) - } - return _zeroString -} - -// Store atomically stores the passed string. -func (x *String) Store(val string) { - x.v.Store(val) -} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go deleted file mode 100644 index 83d92eda..00000000 --- a/vendor/go.uber.org/atomic/string_ext.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go -// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which -// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351 - -// String returns the wrapped value. -func (s *String) String() string { - return s.Load() -} - -// MarshalText encodes the wrapped string into a textual form. -// -// This makes it encodable as JSON, YAML, XML, and more. -func (s *String) MarshalText() ([]byte, error) { - return []byte(s.Load()), nil -} - -// UnmarshalText decodes text and replaces the wrapped string with it. -// -// This makes it decodable from JSON, YAML, XML, and more. -func (s *String) UnmarshalText(b []byte) error { - s.Store(string(b)) - return nil -} diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go deleted file mode 100644 index 33460fc3..00000000 --- a/vendor/go.uber.org/atomic/time.go +++ /dev/null @@ -1,55 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "time" -) - -// Time is an atomic type-safe wrapper for time.Time values. -type Time struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroTime time.Time - -// NewTime creates a new Time. -func NewTime(val time.Time) *Time { - x := &Time{} - if val != _zeroTime { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped time.Time. -func (x *Time) Load() time.Time { - return unpackTime(x.v.Load()) -} - -// Store atomically stores the passed time.Time. -func (x *Time) Store(val time.Time) { - x.v.Store(packTime(val)) -} diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go deleted file mode 100644 index 1e3dc978..00000000 --- a/vendor/go.uber.org/atomic/time_ext.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go - -func packTime(t time.Time) interface{} { - return t -} - -func unpackTime(v interface{}) time.Time { - if t, ok := v.(time.Time); ok { - return t - } - return time.Time{} -} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go deleted file mode 100644 index 7859a9cc..00000000 --- a/vendor/go.uber.org/atomic/uint32.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint32 is an atomic wrapper around uint32. -type Uint32 struct { - _ nocmp // disallow non-atomic comparison - - v uint32 -} - -// NewUint32 creates a new Uint32. -func NewUint32(val uint32) *Uint32 { - return &Uint32{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(delta uint32) uint32 { - return atomic.AddUint32(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(delta uint32) uint32 { - return atomic.AddUint32(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) (swapped bool) { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(val uint32) { - atomic.StoreUint32(&i.v, val) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(val uint32) (old uint32) { - return atomic.SwapUint32(&i.v, val) -} - -// MarshalJSON encodes the wrapped uint32 into JSON. -func (i *Uint32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint32. -func (i *Uint32) UnmarshalJSON(b []byte) error { - var v uint32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint32) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go deleted file mode 100644 index 2f2a7db6..00000000 --- a/vendor/go.uber.org/atomic/uint64.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint64 is an atomic wrapper around uint64. -type Uint64 struct { - _ nocmp // disallow non-atomic comparison - - v uint64 -} - -// NewUint64 creates a new Uint64. -func NewUint64(val uint64) *Uint64 { - return &Uint64{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(delta uint64) uint64 { - return atomic.AddUint64(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(delta uint64) uint64 { - return atomic.AddUint64(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) (swapped bool) { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(val uint64) { - atomic.StoreUint64(&i.v, val) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(val uint64) (old uint64) { - return atomic.SwapUint64(&i.v, val) -} - -// MarshalJSON encodes the wrapped uint64 into JSON. -func (i *Uint64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint64. -func (i *Uint64) UnmarshalJSON(b []byte) error { - var v uint64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint64) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go deleted file mode 100644 index ecf7a772..00000000 --- a/vendor/go.uber.org/atomic/uintptr.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uintptr is an atomic wrapper around uintptr. -type Uintptr struct { - _ nocmp // disallow non-atomic comparison - - v uintptr -} - -// NewUintptr creates a new Uintptr. -func NewUintptr(val uintptr) *Uintptr { - return &Uintptr{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uintptr) Load() uintptr { - return atomic.LoadUintptr(&i.v) -} - -// Add atomically adds to the wrapped uintptr and returns the new value. -func (i *Uintptr) Add(delta uintptr) uintptr { - return atomic.AddUintptr(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uintptr and returns the new value. -func (i *Uintptr) Sub(delta uintptr) uintptr { - return atomic.AddUintptr(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uintptr and returns the new value. -func (i *Uintptr) Inc() uintptr { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uintptr and returns the new value. -func (i *Uintptr) Dec() uintptr { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { - return atomic.CompareAndSwapUintptr(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uintptr) Store(val uintptr) { - atomic.StoreUintptr(&i.v, val) -} - -// Swap atomically swaps the wrapped uintptr and returns the old value. -func (i *Uintptr) Swap(val uintptr) (old uintptr) { - return atomic.SwapUintptr(&i.v, val) -} - -// MarshalJSON encodes the wrapped uintptr into JSON. -func (i *Uintptr) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uintptr. -func (i *Uintptr) UnmarshalJSON(b []byte) error { - var v uintptr - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uintptr) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go deleted file mode 100644 index 169f793d..00000000 --- a/vendor/go.uber.org/atomic/unsafe_pointer.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "sync/atomic" - "unsafe" -) - -// UnsafePointer is an atomic wrapper around unsafe.Pointer. -type UnsafePointer struct { - _ nocmp // disallow non-atomic comparison - - v unsafe.Pointer -} - -// NewUnsafePointer creates a new UnsafePointer. -func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { - return &UnsafePointer{v: val} -} - -// Load atomically loads the wrapped value. -func (p *UnsafePointer) Load() unsafe.Pointer { - return atomic.LoadPointer(&p.v) -} - -// Store atomically stores the passed value. -func (p *UnsafePointer) Store(val unsafe.Pointer) { - atomic.StorePointer(&p.v, val) -} - -// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. -func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { - return atomic.SwapPointer(&p.v, val) -} - -// CAS is an atomic compare-and-swap. -func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { - return atomic.CompareAndSwapPointer(&p.v, old, new) -} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go deleted file mode 100644 index 671f3a38..00000000 --- a/vendor/go.uber.org/atomic/value.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "sync/atomic" - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct { - atomic.Value - - _ nocmp // disallow non-atomic comparison -} diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml deleted file mode 100644 index 6d4d1be7..00000000 --- a/vendor/go.uber.org/multierr/.codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore deleted file mode 100644 index b9a05e3d..00000000 --- a/vendor/go.uber.org/multierr/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/vendor -cover.html -cover.out -/bin diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md deleted file mode 100644 index 3ba05276..00000000 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ /dev/null @@ -1,72 +0,0 @@ -Releases -======== - -v1.8.0 (2022-02-28) -=================== - -- `Combine`: perform zero allocations when there are no errors. - - -v1.7.0 (2021-05-06) -=================== - -- Add `AppendInvoke` to append into errors from `defer` blocks. - - -v1.6.0 (2020-09-14) -=================== - -- Actually drop library dependency on development-time tooling. - - -v1.5.0 (2020-02-24) -=================== - -- Drop library dependency on development-time tooling. - - -v1.4.0 (2019-11-04) -=================== - -- Add `AppendInto` function to more ergonomically build errors inside a - loop. - - -v1.3.0 (2019-10-29) -=================== - -- Switch to Go modules. - - -v1.2.0 (2019-09-26) -=================== - -- Support extracting and matching against wrapped errors with `errors.As` - and `errors.Is`. - - -v1.1.0 (2017-06-30) -=================== - -- Added an `Errors(error) []error` function to extract the underlying list of - errors for a multierr error. - - -v1.0.0 (2017-05-31) -=================== - -No changes since v0.2.0. This release is committing to making no breaking -changes to the current API in the 1.X series. - - -v0.2.0 (2017-04-11) -=================== - -- Repeatedly appending to the same error is now faster due to fewer - allocations. - - -v0.1.0 (2017-31-03) -=================== - -- Initial release diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile deleted file mode 100644 index dcb6fe72..00000000 --- a/vendor/go.uber.org/multierr/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -# Directory to put `go install`ed binaries in. -export GOBIN ?= $(shell pwd)/bin - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) - -.PHONY: golint -golint: - @cd tools && go install golang.org/x/lint/golint - @$(GOBIN)/golint ./... - -.PHONY: staticcheck -staticcheck: - @cd tools && go install honnef.co/go/tools/cmd/staticcheck - @$(GOBIN)/staticcheck ./... - -.PHONY: lint -lint: gofmt golint staticcheck - -.PHONY: cover -cover: - go test -race -coverprofile=cover.out -coverpkg=./... -v ./... - go tool cover -html=cover.out -o cover.html diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md deleted file mode 100644 index 70aacecd..00000000 --- a/vendor/go.uber.org/multierr/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -`multierr` allows combining one or more Go `error`s together. - -## Installation - - go get -u go.uber.org/multierr - -## Status - -Stable: No breaking changes will be made before 2.0. - -------------------------------------------------------------------------------- - -Released under the [MIT License]. - -[MIT License]: LICENSE.txt -[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr -[doc]: https://pkg.go.dev/go.uber.org/multierr -[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg -[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml -[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go deleted file mode 100644 index f45af149..00000000 --- a/vendor/go.uber.org/multierr/error.go +++ /dev/null @@ -1,652 +0,0 @@ -// Copyright (c) 2017-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package multierr allows combining one or more errors together. -// -// Overview -// -// Errors can be combined with the use of the Combine function. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) -// -// If only two errors are being combined, the Append function may be used -// instead. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The underlying list of errors for a returned error object may be retrieved -// with the Errors function. -// -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:", errors) -// } -// -// Appending from a loop -// -// You sometimes need to append into an error from a loop. -// -// var err error -// for _, item := range items { -// err = multierr.Append(err, process(item)) -// } -// -// Cases like this may require knowledge of whether an individual instance -// failed. This usually requires introduction of a new variable. -// -// var err error -// for _, item := range items { -// if perr := process(item); perr != nil { -// log.Warn("skipping item", item) -// err = multierr.Append(err, perr) -// } -// } -// -// multierr includes AppendInto to simplify cases like this. -// -// var err error -// for _, item := range items { -// if multierr.AppendInto(&err, process(item)) { -// log.Warn("skipping item", item) -// } -// } -// -// This will append the error into the err variable, and return true if that -// individual error was non-nil. -// -// See AppendInto for more information. -// -// Deferred Functions -// -// Go makes it possible to modify the return value of a function in a defer -// block if the function was using named returns. This makes it possible to -// record resource cleanup failures from deferred blocks. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } -// -// multierr provides the Invoker type and AppendInvoke function to make cases -// like the above simpler and obviate the need for a closure. The following is -// roughly equivalent to the example above. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(conn)) -// // ... -// } -// -// See AppendInvoke and Invoker for more information. -// -// Advanced Usage -// -// Errors returned by Combine and Append MAY implement the following -// interface. -// -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } -// -// Note that if you need access to list of errors behind a multierr error, you -// should prefer using the Errors function. That said, if you need cheap -// read-only access to the underlying errors slice, you can attempt to cast -// the error to this interface. You MUST handle the failure case gracefully -// because errors returned by Combine and Append are not guaranteed to -// implement this interface. -// -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } -package multierr // import "go.uber.org/multierr" - -import ( - "bytes" - "errors" - "fmt" - "io" - "strings" - "sync" - - "go.uber.org/atomic" -) - -var ( - // Separator for single-line error messages. - _singlelineSeparator = []byte("; ") - - // Prefix for multi-line messages - _multilinePrefix = []byte("the following errors occurred:") - - // Prefix for the first and following lines of an item in a list of - // multi-line error messages. - // - // For example, if a single item is: - // - // foo - // bar - // - // It will become, - // - // - foo - // bar - _multilineSeparator = []byte("\n - ") - _multilineIndent = []byte(" ") -) - -// _bufferPool is a pool of bytes.Buffers. -var _bufferPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, -} - -type errorGroup interface { - Errors() []error -} - -// Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, a nil slice is returned. -// -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) -// -// If the error is not composed of other errors, the returned slice contains -// just the error that was passed in. -// -// Callers of this function are free to modify the returned slice. -func Errors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result -} - -// multiError is an error that holds one or more errors. -// -// An instance of this is guaranteed to be non-empty and flattened. That is, -// none of the errors inside multiError are other multiErrors. -// -// multiError formats to a semi-colon delimited list of error messages with -// %v and with a more readable multi-line format with %+v. -type multiError struct { - copyNeeded atomic.Bool - errors []error -} - -var _ errorGroup = (*multiError)(nil) - -// Errors returns the list of underlying errors. -// -// This slice MUST NOT be modified. -func (merr *multiError) Errors() []error { - if merr == nil { - return nil - } - return merr.errors -} - -// As attempts to find the first error in the error list that matches the type -// of the value that target points to. -// -// This function allows errors.As to traverse the values stored on the -// multierr error. -func (merr *multiError) As(target interface{}) bool { - for _, err := range merr.Errors() { - if errors.As(err, target) { - return true - } - } - return false -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored on the -// multierr error. -func (merr *multiError) Is(target error) bool { - for _, err := range merr.Errors() { - if errors.Is(err, target) { - return true - } - } - return false -} - -func (merr *multiError) Error() string { - if merr == nil { - return "" - } - - buff := _bufferPool.Get().(*bytes.Buffer) - buff.Reset() - - merr.writeSingleline(buff) - - result := buff.String() - _bufferPool.Put(buff) - return result -} - -func (merr *multiError) Format(f fmt.State, c rune) { - if c == 'v' && f.Flag('+') { - merr.writeMultiline(f) - } else { - merr.writeSingleline(f) - } -} - -func (merr *multiError) writeSingleline(w io.Writer) { - first := true - for _, item := range merr.errors { - if first { - first = false - } else { - w.Write(_singlelineSeparator) - } - io.WriteString(w, item.Error()) - } -} - -func (merr *multiError) writeMultiline(w io.Writer) { - w.Write(_multilinePrefix) - for _, item := range merr.errors { - w.Write(_multilineSeparator) - writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) - } -} - -// Writes s to the writer with the given prefix added before each line after -// the first. -func writePrefixLine(w io.Writer, prefix []byte, s string) { - first := true - for len(s) > 0 { - if first { - first = false - } else { - w.Write(prefix) - } - - idx := strings.IndexByte(s, '\n') - if idx < 0 { - idx = len(s) - 1 - } - - io.WriteString(w, s[:idx+1]) - s = s[idx+1:] - } -} - -type inspectResult struct { - // Number of top-level non-nil errors - Count int - - // Total number of errors including multiErrors - Capacity int - - // Index of the first non-nil error in the list. Value is meaningless if - // Count is zero. - FirstErrorIdx int - - // Whether the list contains at least one multiError - ContainsMultiError bool -} - -// Inspects the given slice of errors so that we can efficiently allocate -// space for it. -func inspect(errors []error) (res inspectResult) { - first := true - for i, err := range errors { - if err == nil { - continue - } - - res.Count++ - if first { - first = false - res.FirstErrorIdx = i - } - - if merr, ok := err.(*multiError); ok { - res.Capacity += len(merr.errors) - res.ContainsMultiError = true - } else { - res.Capacity++ - } - } - return -} - -// fromSlice converts the given list of errors into a single error. -func fromSlice(errors []error) error { - // Don't pay to inspect small slices. - switch len(errors) { - case 0: - return nil - case 1: - return errors[0] - } - - res := inspect(errors) - switch res.Count { - case 0: - return nil - case 1: - // only one non-nil entry - return errors[res.FirstErrorIdx] - case len(errors): - if !res.ContainsMultiError { - // Error list is flat. Make a copy of it - // Otherwise "errors" escapes to the heap - // unconditionally for all other cases. - // This lets us optimize for the "no errors" case. - out := make([]error, len(errors)) - copy(out, errors) - return &multiError{errors: out} - } - } - - nonNilErrs := make([]error, 0, res.Capacity) - for _, err := range errors[res.FirstErrorIdx:] { - if err == nil { - continue - } - - if nested, ok := err.(*multiError); ok { - nonNilErrs = append(nonNilErrs, nested.errors...) - } else { - nonNilErrs = append(nonNilErrs, err) - } - } - - return &multiError{errors: nonNilErrs} -} - -// Combine combines the passed errors into a single error. -// -// If zero arguments were passed or if all items are nil, a nil error is -// returned. -// -// Combine(nil, nil) // == nil -// -// If only a single error was passed, it is returned as-is. -// -// Combine(err) // == err -// -// Combine skips over nil arguments so this function may be used to combine -// together errors from operations that fail independently of each other. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) -// -// If any of the passed errors is a multierr error, it will be flattened along -// with the other errors. -// -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) -// -// The returned error formats into a readable multi-line error message if -// formatted with %+v. -// -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) -func Combine(errors ...error) error { - return fromSlice(errors) -} - -// Append appends the given errors together. Either value may be nil. -// -// This function is a specialization of Combine for the common case where -// there are only two errors. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The following pattern may also be used to record failure of deferred -// operations without losing information about the original error. -// -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() -func Append(left error, right error) error { - switch { - case left == nil: - return right - case right == nil: - return left - } - - if _, ok := right.(*multiError); !ok { - if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { - // Common case where the error on the left is constantly being - // appended to. - errs := append(l.errors, right) - return &multiError{errors: errs} - } else if !ok { - // Both errors are single errors. - return &multiError{errors: []error{left, right}} - } - } - - // Either right or both, left and right, are multiErrors. Rely on usual - // expensive logic. - errors := [2]error{left, right} - return fromSlice(errors[0:]) -} - -// AppendInto appends an error into the destination of an error pointer and -// returns whether the error being appended was non-nil. -// -// var err error -// multierr.AppendInto(&err, r.Close()) -// multierr.AppendInto(&err, w.Close()) -// -// The above is equivalent to, -// -// err := multierr.Append(r.Close(), w.Close()) -// -// As AppendInto reports whether the provided error was non-nil, it may be -// used to build a multierr error in a loop more ergonomically. For example: -// -// var err error -// for line := range lines { -// var item Item -// if multierr.AppendInto(&err, parse(line, &item)) { -// continue -// } -// items = append(items, item) -// } -// -// Compare this with a version that relies solely on Append: -// -// var err error -// for line := range lines { -// var item Item -// if parseErr := parse(line, &item); parseErr != nil { -// err = multierr.Append(err, parseErr) -// continue -// } -// items = append(items, item) -// } -func AppendInto(into *error, err error) (errored bool) { - if into == nil { - // We panic if 'into' is nil. This is not documented above - // because suggesting that the pointer must be non-nil may - // confuse users into thinking that the error that it points - // to must be non-nil. - panic("misuse of multierr.AppendInto: into pointer must not be nil") - } - - if err == nil { - return false - } - *into = Append(*into, err) - return true -} - -// Invoker is an operation that may fail with an error. Use it with -// AppendInvoke to append the result of calling the function into an error. -// This allows you to conveniently defer capture of failing operations. -// -// See also, Close and Invoke. -type Invoker interface { - Invoke() error -} - -// Invoke wraps a function which may fail with an error to match the Invoker -// interface. Use it to supply functions matching this signature to -// AppendInvoke. -// -// For example, -// -// func processReader(r io.Reader) (err error) { -// scanner := bufio.NewScanner(r) -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// for scanner.Scan() { -// // ... -// } -// // ... -// } -// -// In this example, the following line will construct the Invoker right away, -// but defer the invocation of scanner.Err() until the function returns. -// -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -type Invoke func() error - -// Invoke calls the supplied function and returns its result. -func (i Invoke) Invoke() error { return i() } - -// Close builds an Invoker that closes the provided io.Closer. Use it with -// AppendInvoke to close io.Closers and append their results into an error. -// -// For example, -// -// func processFile(path string) (err error) { -// f, err := os.Open(path) -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// return processReader(f) -// } -// -// In this example, multierr.Close will construct the Invoker right away, but -// defer the invocation of f.Close until the function returns. -// -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -func Close(closer io.Closer) Invoker { - return Invoke(closer.Close) -} - -// AppendInvoke appends the result of calling the given Invoker into the -// provided error pointer. Use it with named returns to safely defer -// invocation of fallible operations until a function returns, and capture the -// resulting errors. -// -// func doSomething(...) (err error) { -// // ... -// f, err := openFile(..) -// if err != nil { -// return err -// } -// -// // multierr will call f.Close() when this function returns and -// // if the operation fails, its append its error into the -// // returned error. -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// -// scanner := bufio.NewScanner(f) -// // Similarly, this scheduled scanner.Err to be called and -// // inspected when the function returns and append its error -// // into the returned error. -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// -// // ... -// } -// -// Without defer, AppendInvoke behaves exactly like AppendInto. -// -// err := // ... -// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) -// -// // ...is roughly equivalent to... -// -// err := // ... -// multierr.AppendInto(&err, foo()) -// -// The advantage of the indirection introduced by Invoker is to make it easy -// to defer the invocation of a function. Without this indirection, the -// invoked function will be evaluated at the time of the defer block rather -// than when the function returns. -// -// // BAD: This is likely not what the caller intended. This will evaluate -// // foo() right away and append its result into the error when the -// // function returns. -// defer multierr.AppendInto(&err, foo()) -// -// // GOOD: This will defer invocation of foo unutil the function returns. -// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) -// -// multierr provides a few Invoker implementations out of the box for -// convenience. See Invoker for more information. -func AppendInvoke(into *error, invoker Invoker) { - AppendInto(into, invoker.Invoke()) -} diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml deleted file mode 100644 index 6ef084ec..00000000 --- a/vendor/go.uber.org/multierr/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml deleted file mode 100644 index 8e5ca7d3..00000000 --- a/vendor/go.uber.org/zap/.codecov.yml +++ /dev/null @@ -1,17 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 95% # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure -ignore: - - internal/readme/readme.go - diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl deleted file mode 100644 index 3154a1e6..00000000 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ /dev/null @@ -1,109 +0,0 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Blazing fast, structured, leveled logging in Go. - -## Installation - -`go get -u go.uber.org/zap` - -Note that zap only supports the two most recent minor versions of Go. - -## Quick Start - -In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than other structured logging -packages and includes both structured and `printf`-style APIs. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() // flushes buffer, if any -sugar := logger.Sugar() -sugar.Infow("failed to fetch URL", - // Structured context as loosely typed key-value pairs. - "url", url, - "attempt", 3, - "backoff", time.Second, -) -sugar.Infof("Failed to fetch URL: %s", url) -``` - -When performance and type safety are critical, use the `Logger`. It's even -faster than the `SugaredLogger` and allocates far less, but it only supports -structured logging. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() -logger.Info("failed to fetch URL", - // Structured context as strongly typed Field values. - zap.String("url", url), - zap.Int("attempt", 3), - zap.Duration("backoff", time.Second), -) -``` - -See the [documentation][doc] and [FAQ](FAQ.md) for more details. - -## Performance - -For applications that log in the hot path, reflection-based serialization and -string formatting are prohibitively expensive — they're CPU-intensive -and make many small allocations. Put differently, using `encoding/json` and -`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. - -Zap takes a different approach. It includes a reflection-free, zero-allocation -JSON encoder, and the base `Logger` strives to avoid serialization overhead -and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every -allocation and when they'd prefer a more familiar, loosely typed API. - -As measured by its own [benchmarking suite][], not only is zap more performant -than comparable structured logging packages — it's also faster than the -standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) - -Log a message and 10 fields: - -{{.BenchmarkAddingFields}} - -Log a message with a logger that already has 10 fields of context: - -{{.BenchmarkAccumulatedContext}} - -Log a static string, without any context or `printf`-style templating: - -{{.BenchmarkWithoutFields}} - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -zap to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on -issues and pull requests, but you can also report any negative conduct to -oss-conduct@uber.com. That email list is a private, safe space; even the zap -maintainers don't have access, so don't hesitate to hold us to a high -standard. - -
- -Released under the [MIT License](LICENSE.txt). - -1 In particular, keep in mind that we may be -benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) - -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.com/uber-go/zap -[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/zap -[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock - diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md deleted file mode 100644 index 794ee303..00000000 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ /dev/null @@ -1,516 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). - -## 1.19.1 (8 Sep 2021) - -### Fixed -* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. -* [#1003][]: JSON: Fix inaccurate precision when encoding float32. - -[#1001]: https://github.com/uber-go/zap/pull/1001 -[#1003]: https://github.com/uber-go/zap/pull/1003 - -## 1.19.0 (9 Aug 2021) - -Enhancements: -* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. -* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields - better. - -[#975]: https://github.com/uber-go/zap/pull/975 -[#984]: https://github.com/uber-go/zap/pull/984 - -Thanks to @lancoLiu and @thockin for their contributions to this release. - -## 1.18.1 (28 Jun 2021) - -Bugfixes: -* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. - -[#974]: https://github.com/uber-go/zap/pull/974 - -## 1.18.0 (28 Jun 2021) - -Enhancements: -* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers - messages in-memory and flushes them periodically. -* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. -* [#897][]: Add `zap.WithClock` option to control the source of time via the - new `zapcore.Clock` interface. -* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` - methods don't match expectations. -* [#943][]: Add support for filtering by level or arbitrary matcher function to - `zaptest/observer`. -* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's - `buffer.Buffer`. - -Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee -for their contributions to this release. - -[#691]: https://github.com/uber-go/zap/pull/691 -[#897]: https://github.com/uber-go/zap/pull/897 -[#943]: https://github.com/uber-go/zap/pull/943 -[#949]: https://github.com/uber-go/zap/pull/949 -[#961]: https://github.com/uber-go/zap/pull/961 -[#971]: https://github.com/uber-go/zap/pull/971 - -## 1.17.0 (25 May 2021) - -Bugfixes: -* [#867][]: Encode `` for nil `error` instead of a panic. -* [#931][], [#936][]: Update minimum version constraints to address - vulnerabilities in dependencies. - -Enhancements: -* [#865][]: Improve alignment of fields of the Logger struct, reducing its - size from 96 to 80 bytes. -* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. -* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler - with the `application/x-www-form-urlencoded` content type. -* [#912][]: Support multi-field encoding with `zap.Inline`. -* [#913][]: Speed up SugaredLogger for calls with a single string. -* [#928][]: Add support for filtering by field name to `zaptest/observer`. - -Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. - -## 1.16.0 (1 Sep 2020) - -Bugfixes: -* [#828][]: Fix missing newline in IncreaseLevel error messages. -* [#835][]: Fix panic in JSON encoder when encoding times or durations - without specifying a time or duration encoder. -* [#843][]: Honor CallerSkip when taking stack traces. -* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. -* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. - -Enhancements: -* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders - for custom layouts. -* [#697][]: Added support for a configurable delimiter in the console encoder. -* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. -* [#844][]: Add ability to include the calling function as part of logs. -* [#843][]: Add `StackSkip` for including truncated stacks as a field. -* [#861][]: Add options to customize Fatal behaviour for better testability. - -Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. - -## 1.15.0 (23 Apr 2020) - -Bugfixes: -* [#804][]: Fix handling of `Time` values out of `UnixNano` range. -* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. - -Enhancements: -* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This - allows disabling annotation of log entries with caller information if - previously enabled with `AddCaller`. -* [#813][]: Deprecate `NewSampler` constructor in favor of - `NewSamplerWithOptions` which supports a `SamplerHook` option. This option - adds support for monitoring sampling decisions through a hook. - -Thanks to @danielbprice for their contributions to this release. - -## 1.14.1 (14 Mar 2020) - -Bugfixes: -* [#791][]: Fix panic on attempting to build a logger with an invalid Config. -* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's - development-time dependencies. -* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to - be generated for arrays of `time.Time` objects when using string-based time - formats. - -Thanks to @YashishDua for their contributions to this release. - -## 1.14.0 (20 Feb 2020) - -Enhancements: -* [#771][]: Optimize calls for disabled log levels. -* [#773][]: Add millisecond duration encoder. -* [#775][]: Add option to increase the level of a logger. -* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. - -Thanks to @caibirdme for their contributions to this release. - -## 1.13.0 (13 Nov 2019) - -Enhancements: -* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors - to log pointers to primitives with support for `nil` values. - -Thanks to @jbizzle for their contributions to this release. - -## 1.12.0 (29 Oct 2019) - -Enhancements: -* [#751][]: Migrate to Go modules. - -## 1.11.0 (21 Oct 2019) - -Enhancements: -* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. -* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. - -Thanks to @juicemia, @uhthomas for their contributions to this release. - -## 1.10.0 (29 Apr 2019) - -Bugfixes: -* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a - string. -* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. - -Enhancements: -* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test - loggers. -* [#675][]: Don't panic when encoding a String field. -* [#704][]: Disable HTML escaping for JSON objects encoded using the - reflect-based encoder. - -Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions -to this release. - -## v1.9.1 (06 Aug 2018) - -Bugfixes: - -* [#614][]: MapObjectEncoder should not ignore empty slices. - -## v1.9.0 (19 Jul 2018) - -Enhancements: -* [#602][]: Reduce number of allocations when logging with reflection. -* [#572][], [#606][]: Expose a registry for third-party logging sinks. - -Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and -@dimroc for their contributions to this release. - -## v1.8.0 (13 Apr 2018) - -Enhancements: -* [#508][]: Make log level configurable when redirecting the standard - library's logger. -* [#518][]: Add a logger that writes to a `*testing.TB`. -* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. - -Bugfixes: -* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. - -Thanks to @DiSiqueira and @djui for their contributions to this release. - -## v1.7.1 (25 Sep 2017) - -Bugfixes: -* [#504][]: Store strings when using AddByteString with the map encoder. - -## v1.7.0 (21 Sep 2017) - -Enhancements: - -* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user - to specify the level of the logged messages. - -## v1.6.0 (30 Aug 2017) - -Enhancements: - -* [#491][]: Omit zap stack frames from stacktraces. -* [#490][]: Add a `ContextMap` method to observer logs for simpler - field validation in tests. - -## v1.5.0 (22 Jul 2017) - -Enhancements: - -* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. -* [#465][]: Support user-supplied encoders for logger names. - -Bugfixes: - -* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. - -Thanks to @richard-tunein and @pavius for their contributions to this release. - -## v1.4.1 (08 Jun 2017) - -This release fixes two bugs. - -Bugfixes: - -* [#435][]: Support a variety of case conventions when unmarshaling levels. -* [#444][]: Fix a panic in the observer. - -## v1.4.0 (12 May 2017) - -This release adds a few small features and is fully backward-compatible. - -Enhancements: - -* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to - override the Unix-style default. -* [#425][]: Preserve time zones when logging times. -* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a - variety of operations a bit simpler. - -## v1.3.0 (25 Apr 2017) - -This release adds an enhancement to zap's testing helpers as well as the -ability to marshal an AtomicLevel. It is fully backward-compatible. - -Enhancements: - -* [#415][]: Add a substring-filtering helper to zap's observer. This is - particularly useful when testing the `SugaredLogger`. -* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. - -## v1.2.0 (13 Apr 2017) - -This release adds a gRPC compatibility wrapper. It is fully backward-compatible. - -Enhancements: - -* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements - `grpclog.Logger`. - -## v1.1.0 (31 Mar 2017) - -This release fixes two bugs and adds some enhancements to zap's testing helpers. -It is fully backward-compatible. - -Bugfixes: - -* [#385][]: Fix caller path trimming on Windows. -* [#396][]: Fix a panic when attempting to use non-existent directories with - zap's configuration struct. - -Enhancements: - -* [#386][]: Add filtering helpers to zaptest's observing logger. - -Thanks to @moitias for contributing to this release. - -## v1.0.0 (14 Mar 2017) - -This is zap's first stable release. All exported APIs are now final, and no -further breaking changes will be made in the 1.x release series. Anyone using a -semver-aware dependency manager should now pin to `^1`. - -Breaking changes: - -* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without - casting from `[]byte` to `string`. -* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, - `zap.Logger`, and `zap.SugaredLogger`. -* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to - clash with other testing helpers. - -Bugfixes: - -* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier - for tab-separated console output. -* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to - work with concurrency-safe `WriteSyncer` implementations. -* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux - systems. -* [#373][]: Report the correct caller from zap's standard library - interoperability wrappers. - -Enhancements: - -* [#348][]: Add a registry allowing third-party encodings to work with zap's - built-in `Config`. -* [#327][]: Make the representation of logger callers configurable (like times, - levels, and durations). -* [#376][]: Allow third-party encoders to use their own buffer pools, which - removes the last performance advantage that zap's encoders have over plugins. -* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple - `WriteSyncer`s and lock the result. -* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in - Go 1.9). -* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it - easier for particularly punctilious users to unit test their application's - logging. - -Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their -contributions to this release. - -## v1.0.0-rc.3 (7 Mar 2017) - -This is the third release candidate for zap's stable release. There are no -breaking changes. - -Bugfixes: - -* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs - rather than `[]uint8`. - -Enhancements: - -* [#307][]: Users can opt into colored output for log levels. -* [#353][]: In addition to hijacking the output of the standard library's - package-global logging functions, users can now construct a zap-backed - `log.Logger` instance. -* [#311][]: Frames from common runtime functions and some of zap's internal - machinery are now omitted from stacktraces. - -Thanks to @ansel1 and @suyash for their contributions to this release. - -## v1.0.0-rc.2 (21 Feb 2017) - -This is the second release candidate for zap's stable release. It includes two -breaking changes. - -Breaking changes: - -* [#316][]: Zap's global loggers are now fully concurrency-safe - (previously, users had to ensure that `ReplaceGlobals` was called before the - loggers were in use). However, they must now be accessed via the `L()` and - `S()` functions. Users can update their projects with - - ``` - gofmt -r "zap.L -> zap.L()" -w . - gofmt -r "zap.S -> zap.S()" -w . - ``` -* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid - JSON and YAML struct tags on all config structs. This release fixes the tags - and adds static analysis to prevent similar bugs in the future. - -Bugfixes: - -* [#321][]: Redirecting the standard library's `log` output now - correctly reports the logger's caller. - -Enhancements: - -* [#325][] and [#333][]: Zap now transparently supports non-standard, rich - errors like those produced by `github.com/pkg/errors`. -* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is - now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> - zap.NewNop()' -w .`. -* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a - more informative error. - -Thanks to @skipor and @chapsuk for their contributions to this release. - -## v1.0.0-rc.1 (14 Feb 2017) - -This is the first release candidate for zap's stable release. There are multiple -breaking changes and improvements from the pre-release version. Most notably: - -* **Zap's import path is now "go.uber.org/zap"** — all users will - need to update their code. -* User-facing types and functions remain in the `zap` package. Code relevant - largely to extension authors is now in the `zapcore` package. -* The `zapcore.Core` type makes it easy for third-party packages to use zap's - internals but provide a different user-facing API. -* `Logger` is now a concrete type instead of an interface. -* A less verbose (though slower) logging API is included by default. -* Package-global loggers `L` and `S` are included. -* A human-friendly console encoder is included. -* A declarative config struct allows common logger configurations to be managed - as configuration instead of code. -* Sampling is more accurate, and doesn't depend on the standard library's shared - timer heap. - -## v0.1.0-beta.1 (6 Feb 2017) - -This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and -upgrade at their leisure. Since this is the first tagged release, there are no -backward compatibility concerns and all functionality is new. - -Early zap adopters should pin to the 0.1.x minor version until they're ready to -upgrade to the upcoming stable release. - -[#316]: https://github.com/uber-go/zap/pull/316 -[#309]: https://github.com/uber-go/zap/pull/309 -[#317]: https://github.com/uber-go/zap/pull/317 -[#321]: https://github.com/uber-go/zap/pull/321 -[#325]: https://github.com/uber-go/zap/pull/325 -[#333]: https://github.com/uber-go/zap/pull/333 -[#326]: https://github.com/uber-go/zap/pull/326 -[#300]: https://github.com/uber-go/zap/pull/300 -[#339]: https://github.com/uber-go/zap/pull/339 -[#307]: https://github.com/uber-go/zap/pull/307 -[#353]: https://github.com/uber-go/zap/pull/353 -[#311]: https://github.com/uber-go/zap/pull/311 -[#366]: https://github.com/uber-go/zap/pull/366 -[#364]: https://github.com/uber-go/zap/pull/364 -[#371]: https://github.com/uber-go/zap/pull/371 -[#362]: https://github.com/uber-go/zap/pull/362 -[#369]: https://github.com/uber-go/zap/pull/369 -[#347]: https://github.com/uber-go/zap/pull/347 -[#373]: https://github.com/uber-go/zap/pull/373 -[#348]: https://github.com/uber-go/zap/pull/348 -[#327]: https://github.com/uber-go/zap/pull/327 -[#376]: https://github.com/uber-go/zap/pull/376 -[#346]: https://github.com/uber-go/zap/pull/346 -[#365]: https://github.com/uber-go/zap/pull/365 -[#372]: https://github.com/uber-go/zap/pull/372 -[#385]: https://github.com/uber-go/zap/pull/385 -[#396]: https://github.com/uber-go/zap/pull/396 -[#386]: https://github.com/uber-go/zap/pull/386 -[#402]: https://github.com/uber-go/zap/pull/402 -[#415]: https://github.com/uber-go/zap/pull/415 -[#416]: https://github.com/uber-go/zap/pull/416 -[#424]: https://github.com/uber-go/zap/pull/424 -[#425]: https://github.com/uber-go/zap/pull/425 -[#431]: https://github.com/uber-go/zap/pull/431 -[#435]: https://github.com/uber-go/zap/pull/435 -[#444]: https://github.com/uber-go/zap/pull/444 -[#477]: https://github.com/uber-go/zap/pull/477 -[#465]: https://github.com/uber-go/zap/pull/465 -[#460]: https://github.com/uber-go/zap/pull/460 -[#470]: https://github.com/uber-go/zap/pull/470 -[#487]: https://github.com/uber-go/zap/pull/487 -[#490]: https://github.com/uber-go/zap/pull/490 -[#491]: https://github.com/uber-go/zap/pull/491 -[#504]: https://github.com/uber-go/zap/pull/504 -[#508]: https://github.com/uber-go/zap/pull/508 -[#518]: https://github.com/uber-go/zap/pull/518 -[#577]: https://github.com/uber-go/zap/pull/577 -[#574]: https://github.com/uber-go/zap/pull/574 -[#602]: https://github.com/uber-go/zap/pull/602 -[#572]: https://github.com/uber-go/zap/pull/572 -[#606]: https://github.com/uber-go/zap/pull/606 -[#614]: https://github.com/uber-go/zap/pull/614 -[#657]: https://github.com/uber-go/zap/pull/657 -[#706]: https://github.com/uber-go/zap/pull/706 -[#610]: https://github.com/uber-go/zap/pull/610 -[#675]: https://github.com/uber-go/zap/pull/675 -[#704]: https://github.com/uber-go/zap/pull/704 -[#725]: https://github.com/uber-go/zap/pull/725 -[#736]: https://github.com/uber-go/zap/pull/736 -[#751]: https://github.com/uber-go/zap/pull/751 -[#758]: https://github.com/uber-go/zap/pull/758 -[#771]: https://github.com/uber-go/zap/pull/771 -[#773]: https://github.com/uber-go/zap/pull/773 -[#775]: https://github.com/uber-go/zap/pull/775 -[#786]: https://github.com/uber-go/zap/pull/786 -[#791]: https://github.com/uber-go/zap/pull/791 -[#795]: https://github.com/uber-go/zap/pull/795 -[#799]: https://github.com/uber-go/zap/pull/799 -[#804]: https://github.com/uber-go/zap/pull/804 -[#812]: https://github.com/uber-go/zap/pull/812 -[#806]: https://github.com/uber-go/zap/pull/806 -[#813]: https://github.com/uber-go/zap/pull/813 -[#629]: https://github.com/uber-go/zap/pull/629 -[#697]: https://github.com/uber-go/zap/pull/697 -[#828]: https://github.com/uber-go/zap/pull/828 -[#835]: https://github.com/uber-go/zap/pull/835 -[#843]: https://github.com/uber-go/zap/pull/843 -[#844]: https://github.com/uber-go/zap/pull/844 -[#852]: https://github.com/uber-go/zap/pull/852 -[#854]: https://github.com/uber-go/zap/pull/854 -[#861]: https://github.com/uber-go/zap/pull/861 -[#862]: https://github.com/uber-go/zap/pull/862 -[#865]: https://github.com/uber-go/zap/pull/865 -[#867]: https://github.com/uber-go/zap/pull/867 -[#881]: https://github.com/uber-go/zap/pull/881 -[#903]: https://github.com/uber-go/zap/pull/903 -[#912]: https://github.com/uber-go/zap/pull/912 -[#913]: https://github.com/uber-go/zap/pull/913 -[#928]: https://github.com/uber-go/zap/pull/928 -[#931]: https://github.com/uber-go/zap/pull/931 -[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md deleted file mode 100644 index 5cd96568..00000000 --- a/vendor/go.uber.org/zap/CONTRIBUTING.md +++ /dev/null @@ -1,75 +0,0 @@ -# Contributing - -We'd love your help making zap the very best structured logging library in Go! - -If you'd like to add new exported APIs, please [open an issue][open-issue] -describing your proposal — discussing API changes ahead of time makes -pull request review much smoother. In your issue, pull request, and any other -communications, please remember to treat your fellow contributors with -respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. - -Note that you'll need to sign [Uber's Contributor License Agreement][cla] -before we can accept any of your contributions. If necessary, a bot will remind -you to accept the CLA when you open your pull request. - -## Setup - -[Fork][fork], then clone the repository: - -``` -mkdir -p $GOPATH/src/go.uber.org -cd $GOPATH/src/go.uber.org -git clone git@github.com:your_github_username/zap.git -cd zap -git remote add upstream https://github.com/uber-go/zap.git -git fetch upstream -``` - -Make sure that the tests and the linters pass: - -``` -make test -make lint -``` - -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - -## Making Changes - -Start by creating a new branch for your changes: - -``` -cd $GOPATH/src/go.uber.org/zap -git checkout master -git fetch upstream -git rebase upstream/master -git checkout -b cool_new_feature -``` - -Make your changes, then ensure that `make lint` and `make test` still pass. If -you're satisfied with your changes, push them to your fork. - -``` -git push origin cool_new_feature -``` - -Then use the GitHub UI to open a pull request. - -At this point, you're waiting on us to review your changes. We *try* to respond -to issues and pull requests within a few business days, and we may suggest some -improvements or alternatives. Once your changes are approved, one of the -project maintainers will merge them. - -We're much more likely to approve your changes if you: - -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. - -[fork]: https://github.com/uber-go/zap/fork -[open-issue]: https://github.com/uber-go/zap/issues/new -[cla]: https://cla-assistant.io/uber-go/zap -[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md deleted file mode 100644 index b183b20b..00000000 --- a/vendor/go.uber.org/zap/FAQ.md +++ /dev/null @@ -1,164 +0,0 @@ -# Frequently Asked Questions - -## Design - -### Why spend so much effort on logger performance? - -Of course, most applications won't notice the impact of a slow logger: they -already take tens or hundreds of milliseconds for each operation, so an extra -millisecond doesn't matter. - -On the other hand, why *not* make structured logging fast? The `SugaredLogger` -isn't any harder to use than other logging packages, and the `Logger` makes -structured logging possible in performance-sensitive contexts. Across a fleet -of Go microservices, making each application even slightly more efficient adds -up quickly. - -### Why aren't `Logger` and `SugaredLogger` interfaces? - -Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and -`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points -out][go-proverbs], "The bigger the interface, the weaker the abstraction." -Interfaces are also rigid — *any* change requires releasing a new major -version, since it breaks all third-party implementations. - -Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much -abstraction, and it lets us add methods without introducing breaking changes. -Your applications should define and depend upon an interface that includes -just the methods you use. - -### Why are some of my logs missing? - -Logs are dropped intentionally by zap when sampling is enabled. The production -configuration (as returned by `NewProductionConfig()` enables sampling which will -cause repeated logs within a second to be sampled. See more details on why sampling -is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). - -### Why sample application logs? - -Applications often experience runs of errors, either because of a bug or -because of a misbehaving user. Logging errors is usually a good idea, but it -can easily make this bad situation worse: not only is your application coping -with a flood of errors, it's also spending extra CPU cycles and I/O logging -those errors. Since writes are typically serialized, logging limits throughput -when you need it most. - -Sampling fixes this problem by dropping repetitive log entries. Under normal -conditions, your application writes out every entry. When similar entries are -logged hundreds or thousands of times each second, though, zap begins dropping -duplicates to preserve throughput. - -### Why do the structured logging APIs take a message in addition to fields? - -Subjectively, we find it helpful to accompany structured context with a brief -description. This isn't critical during development, but it makes debugging -and operating unfamiliar systems much easier. - -More concretely, zap's sampling algorithm uses the message to identify -duplicate entries. In our experience, this is a practical middle ground -between random sampling (which often drops the exact entry that you need while -debugging) and hashing the complete entry (which is prohibitively expensive). - -### Why include package-global loggers? - -Since so many other logging packages include a global logger, many -applications aren't designed to accept loggers as explicit parameters. -Changing function signatures is often a breaking change, so zap includes -global loggers to simplify migration. - -Avoid them where possible. - -### Why include dedicated Panic and Fatal log levels? - -In general, application code should handle errors gracefully instead of using -`panic` or `os.Exit`. However, every rule has exceptions, and it's common to -crash when an error is truly unrecoverable. To avoid losing any information -— especially the reason for the crash — the logger must flush any -buffered entries before the process exits. - -Zap makes this easy by offering `Panic` and `Fatal` logging methods that -automatically flush before exiting. Of course, this doesn't guarantee that -logs will never be lost, but it eliminates a common error. - -See the discussion in uber-go/zap#207 for more details. - -### What's `DPanic`? - -`DPanic` stands for "panic in development." In development, it logs at -`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to -catch errors that are theoretically possible, but shouldn't actually happen, -*without* crashing in production. - -If you've ever written code like this, you need `DPanic`: - -```go -if err != nil { - panic(fmt.Sprintf("shouldn't ever get here: %v", err)) -} -``` - -## Installation - -### What does the error `expects import "go.uber.org/zap"` mean? - -Either zap was installed incorrectly or you're referencing the wrong package -name in your code. - -Zap's source code happens to be hosted on GitHub, but the [import -path][import-path] is `go.uber.org/zap`. This gives us, the project -maintainers, the freedom to move the source code if necessary. However, it -means that you need to take a little care when installing and using the -package. - -If you follow two simple rules, everything should work: install zap with `go -get -u go.uber.org/zap`, and always import it in your code with `import -"go.uber.org/zap"`. Your code shouldn't contain *any* references to -`github.com/uber-go/zap`. - -## Usage - -### Does zap support log rotation? - -Zap doesn't natively support rotating log files, since we prefer to leave this -to an external program like `logrotate`. - -However, it's easy to integrate a log rotation package like -[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. - -```go -// lumberjack.Logger is already safe for concurrent use, so we don't need to -// lock it. -w := zapcore.AddSync(&lumberjack.Logger{ - Filename: "/var/log/myapp/foo.log", - MaxSize: 500, // megabytes - MaxBackups: 3, - MaxAge: 28, // days -}) -core := zapcore.NewCore( - zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), - w, - zap.InfoLevel, -) -logger := zap.New(core) -``` - -## Extensions - -We'd love to support every logging need within zap itself, but we're only -familiar with a handful of log ingestion systems, flag-parsing packages, and -the like. Rather than merging code that we can't effectively debug and -support, we'd rather grow an ecosystem of zap extensions. - -We're aware of the following extensions, but haven't used them ourselves: - -| Package | Integration | -| --- | --- | -| `github.com/tchap/zapext` | Sentry, syslog | -| `github.com/fgrosse/zaptest` | Ginkgo | -| `github.com/blendle/zapdriver` | Stackdriver | -| `github.com/moul/zapgorm` | Gorm | -| `github.com/moul/zapfilter` | Advanced filtering rules | - -[go-proverbs]: https://go-proverbs.github.io/ -[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths -[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile deleted file mode 100644 index 9b1bc3b0..00000000 --- a/vendor/go.uber.org/zap/Makefile +++ /dev/null @@ -1,73 +0,0 @@ -export GOBIN ?= $(shell pwd)/bin - -GOLINT = $(GOBIN)/golint -STATICCHECK = $(GOBIN)/staticcheck -BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem - -# Directories containing independent Go modules. -# -# We track coverage only for the main module. -MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test - -# Many Go tools take file globs or directories as arguments instead of packages. -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -.PHONY: all -all: lint test - -.PHONY: lint -lint: $(GOLINT) $(STATICCHECK) - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking lint..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking staticcheck..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e Makefile | tee -a lint.log - @echo "Checking for license headers..." - @./checklicense.sh | tee -a lint.log - @[ ! -s lint.log ] - @echo "Checking 'go mod tidy'..." - @make tidy - @if ! git diff --quiet; then \ - echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ - git --no-pager diff; \ - fi - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck - -.PHONY: test -test: - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true - -.PHONY: cover -cover: - go test -race -coverprofile=cover.out -coverpkg=./... ./... - go tool cover -html=cover.out -o cover.html - -.PHONY: bench -BENCH ?= . -bench: - @$(foreach dir,$(MODULE_DIRS), ( \ - cd $(dir) && \ - go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ - ) &&) true - -.PHONY: updatereadme -updatereadme: - rm -f README.md - cat .readme.tmpl | go run internal/readme/readme.go > README.md - -.PHONY: tidy -tidy: - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md deleted file mode 100644 index 1e64d6cf..00000000 --- a/vendor/go.uber.org/zap/README.md +++ /dev/null @@ -1,134 +0,0 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Blazing fast, structured, leveled logging in Go. - -## Installation - -`go get -u go.uber.org/zap` - -Note that zap only supports the two most recent minor versions of Go. - -## Quick Start - -In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than other structured logging -packages and includes both structured and `printf`-style APIs. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() // flushes buffer, if any -sugar := logger.Sugar() -sugar.Infow("failed to fetch URL", - // Structured context as loosely typed key-value pairs. - "url", url, - "attempt", 3, - "backoff", time.Second, -) -sugar.Infof("Failed to fetch URL: %s", url) -``` - -When performance and type safety are critical, use the `Logger`. It's even -faster than the `SugaredLogger` and allocates far less, but it only supports -structured logging. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() -logger.Info("failed to fetch URL", - // Structured context as strongly typed Field values. - zap.String("url", url), - zap.Int("attempt", 3), - zap.Duration("backoff", time.Second), -) -``` - -See the [documentation][doc] and [FAQ](FAQ.md) for more details. - -## Performance - -For applications that log in the hot path, reflection-based serialization and -string formatting are prohibitively expensive — they're CPU-intensive -and make many small allocations. Put differently, using `encoding/json` and -`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. - -Zap takes a different approach. It includes a reflection-free, zero-allocation -JSON encoder, and the base `Logger` strives to avoid serialization overhead -and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every -allocation and when they'd prefer a more familiar, loosely typed API. - -As measured by its own [benchmarking suite][], not only is zap more performant -than comparable structured logging packages — it's also faster than the -standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) - -Log a message and 10 fields: - -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 862 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op -| zerolog | 4021 ns/op | +366% | 76 allocs/op -| go-kit | 4542 ns/op | +427% | 105 allocs/op -| apex/log | 26785 ns/op | +3007% | 115 allocs/op -| logrus | 29501 ns/op | +3322% | 125 allocs/op -| log15 | 29906 ns/op | +3369% | 122 allocs/op - -Log a message with a logger that already has 10 fields of context: - -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 126 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op -| zerolog | 88 ns/op | -30% | 0 allocs/op -| go-kit | 5087 ns/op | +3937% | 103 allocs/op -| log15 | 18548 ns/op | +14621% | 73 allocs/op -| apex/log | 26012 ns/op | +20544% | 104 allocs/op -| logrus | 27236 ns/op | +21516% | 113 allocs/op - -Log a static string, without any context or `printf`-style templating: - -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 118 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op -| zerolog | 93 ns/op | -21% | 0 allocs/op -| go-kit | 280 ns/op | +137% | 11 allocs/op -| standard library | 499 ns/op | +323% | 2 allocs/op -| apex/log | 1990 ns/op | +1586% | 10 allocs/op -| logrus | 3129 ns/op | +2552% | 24 allocs/op -| log15 | 3887 ns/op | +3194% | 23 allocs/op - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -zap to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on -issues and pull requests, but you can also report any negative conduct to -oss-conduct@uber.com. That email list is a private, safe space; even the zap -maintainers don't have access, so don't hesitate to hold us to a high -standard. - -
- -Released under the [MIT License](LICENSE.txt). - -1 In particular, keep in mind that we may be -benchmarking against slightly older versions of other packages. Versions are -pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) - -[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap -[doc]: https://pkg.go.dev/go.uber.org/zap -[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/zap -[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod - diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go deleted file mode 100644 index 5be3704a..00000000 --- a/vendor/go.uber.org/zap/array.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "time" - - "go.uber.org/zap/zapcore" -) - -// Array constructs a field with the given key and ArrayMarshaler. It provides -// a flexible, but still type-safe and efficient, way to add array-like types -// to the logging context. The struct's MarshalLogArray method is called lazily. -func Array(key string, val zapcore.ArrayMarshaler) Field { - return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} -} - -// Bools constructs a field that carries a slice of bools. -func Bools(key string, bs []bool) Field { - return Array(key, bools(bs)) -} - -// ByteStrings constructs a field that carries a slice of []byte, each of which -// must be UTF-8 encoded text. -func ByteStrings(key string, bss [][]byte) Field { - return Array(key, byteStringsArray(bss)) -} - -// Complex128s constructs a field that carries a slice of complex numbers. -func Complex128s(key string, nums []complex128) Field { - return Array(key, complex128s(nums)) -} - -// Complex64s constructs a field that carries a slice of complex numbers. -func Complex64s(key string, nums []complex64) Field { - return Array(key, complex64s(nums)) -} - -// Durations constructs a field that carries a slice of time.Durations. -func Durations(key string, ds []time.Duration) Field { - return Array(key, durations(ds)) -} - -// Float64s constructs a field that carries a slice of floats. -func Float64s(key string, nums []float64) Field { - return Array(key, float64s(nums)) -} - -// Float32s constructs a field that carries a slice of floats. -func Float32s(key string, nums []float32) Field { - return Array(key, float32s(nums)) -} - -// Ints constructs a field that carries a slice of integers. -func Ints(key string, nums []int) Field { - return Array(key, ints(nums)) -} - -// Int64s constructs a field that carries a slice of integers. -func Int64s(key string, nums []int64) Field { - return Array(key, int64s(nums)) -} - -// Int32s constructs a field that carries a slice of integers. -func Int32s(key string, nums []int32) Field { - return Array(key, int32s(nums)) -} - -// Int16s constructs a field that carries a slice of integers. -func Int16s(key string, nums []int16) Field { - return Array(key, int16s(nums)) -} - -// Int8s constructs a field that carries a slice of integers. -func Int8s(key string, nums []int8) Field { - return Array(key, int8s(nums)) -} - -// Strings constructs a field that carries a slice of strings. -func Strings(key string, ss []string) Field { - return Array(key, stringArray(ss)) -} - -// Times constructs a field that carries a slice of time.Times. -func Times(key string, ts []time.Time) Field { - return Array(key, times(ts)) -} - -// Uints constructs a field that carries a slice of unsigned integers. -func Uints(key string, nums []uint) Field { - return Array(key, uints(nums)) -} - -// Uint64s constructs a field that carries a slice of unsigned integers. -func Uint64s(key string, nums []uint64) Field { - return Array(key, uint64s(nums)) -} - -// Uint32s constructs a field that carries a slice of unsigned integers. -func Uint32s(key string, nums []uint32) Field { - return Array(key, uint32s(nums)) -} - -// Uint16s constructs a field that carries a slice of unsigned integers. -func Uint16s(key string, nums []uint16) Field { - return Array(key, uint16s(nums)) -} - -// Uint8s constructs a field that carries a slice of unsigned integers. -func Uint8s(key string, nums []uint8) Field { - return Array(key, uint8s(nums)) -} - -// Uintptrs constructs a field that carries a slice of pointer addresses. -func Uintptrs(key string, us []uintptr) Field { - return Array(key, uintptrs(us)) -} - -// Errors constructs a field that carries a slice of errors. -func Errors(key string, errs []error) Field { - return Array(key, errArray(errs)) -} - -type bools []bool - -func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range bs { - arr.AppendBool(bs[i]) - } - return nil -} - -type byteStringsArray [][]byte - -func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range bss { - arr.AppendByteString(bss[i]) - } - return nil -} - -type complex128s []complex128 - -func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendComplex128(nums[i]) - } - return nil -} - -type complex64s []complex64 - -func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendComplex64(nums[i]) - } - return nil -} - -type durations []time.Duration - -func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ds { - arr.AppendDuration(ds[i]) - } - return nil -} - -type float64s []float64 - -func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendFloat64(nums[i]) - } - return nil -} - -type float32s []float32 - -func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendFloat32(nums[i]) - } - return nil -} - -type ints []int - -func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt(nums[i]) - } - return nil -} - -type int64s []int64 - -func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt64(nums[i]) - } - return nil -} - -type int32s []int32 - -func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt32(nums[i]) - } - return nil -} - -type int16s []int16 - -func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt16(nums[i]) - } - return nil -} - -type int8s []int8 - -func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt8(nums[i]) - } - return nil -} - -type stringArray []string - -func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ss { - arr.AppendString(ss[i]) - } - return nil -} - -type times []time.Time - -func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ts { - arr.AppendTime(ts[i]) - } - return nil -} - -type uints []uint - -func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint(nums[i]) - } - return nil -} - -type uint64s []uint64 - -func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint64(nums[i]) - } - return nil -} - -type uint32s []uint32 - -func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint32(nums[i]) - } - return nil -} - -type uint16s []uint16 - -func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint16(nums[i]) - } - return nil -} - -type uint8s []uint8 - -func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint8(nums[i]) - } - return nil -} - -type uintptrs []uintptr - -func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUintptr(nums[i]) - } - return nil -} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go deleted file mode 100644 index 9e929cd9..00000000 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package buffer provides a thin wrapper around a byte slice. Unlike the -// standard library's bytes.Buffer, it supports a portion of the strconv -// package's zero-allocation formatters. -package buffer // import "go.uber.org/zap/buffer" - -import ( - "strconv" - "time" -) - -const _size = 1024 // by default, create 1 KiB buffers - -// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so -// the only way to construct one is via a Pool. -type Buffer struct { - bs []byte - pool Pool -} - -// AppendByte writes a single byte to the Buffer. -func (b *Buffer) AppendByte(v byte) { - b.bs = append(b.bs, v) -} - -// AppendString writes a string to the Buffer. -func (b *Buffer) AppendString(s string) { - b.bs = append(b.bs, s...) -} - -// AppendInt appends an integer to the underlying buffer (assuming base 10). -func (b *Buffer) AppendInt(i int64) { - b.bs = strconv.AppendInt(b.bs, i, 10) -} - -// AppendTime appends the time formatted using the specified layout. -func (b *Buffer) AppendTime(t time.Time, layout string) { - b.bs = t.AppendFormat(b.bs, layout) -} - -// AppendUint appends an unsigned integer to the underlying buffer (assuming -// base 10). -func (b *Buffer) AppendUint(i uint64) { - b.bs = strconv.AppendUint(b.bs, i, 10) -} - -// AppendBool appends a bool to the underlying buffer. -func (b *Buffer) AppendBool(v bool) { - b.bs = strconv.AppendBool(b.bs, v) -} - -// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN -// or +/- Inf. -func (b *Buffer) AppendFloat(f float64, bitSize int) { - b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) -} - -// Len returns the length of the underlying byte slice. -func (b *Buffer) Len() int { - return len(b.bs) -} - -// Cap returns the capacity of the underlying byte slice. -func (b *Buffer) Cap() int { - return cap(b.bs) -} - -// Bytes returns a mutable reference to the underlying byte slice. -func (b *Buffer) Bytes() []byte { - return b.bs -} - -// String returns a string copy of the underlying byte slice. -func (b *Buffer) String() string { - return string(b.bs) -} - -// Reset resets the underlying byte slice. Subsequent writes re-use the slice's -// backing array. -func (b *Buffer) Reset() { - b.bs = b.bs[:0] -} - -// Write implements io.Writer. -func (b *Buffer) Write(bs []byte) (int, error) { - b.bs = append(b.bs, bs...) - return len(bs), nil -} - -// WriteByte writes a single byte to the Buffer. -// -// Error returned is always nil, function signature is compatible -// with bytes.Buffer and bufio.Writer -func (b *Buffer) WriteByte(v byte) error { - b.AppendByte(v) - return nil -} - -// WriteString writes a string to the Buffer. -// -// Error returned is always nil, function signature is compatible -// with bytes.Buffer and bufio.Writer -func (b *Buffer) WriteString(s string) (int, error) { - b.AppendString(s) - return len(s), nil -} - -// TrimNewline trims any final "\n" byte from the end of the buffer. -func (b *Buffer) TrimNewline() { - if i := len(b.bs) - 1; i >= 0 { - if b.bs[i] == '\n' { - b.bs = b.bs[:i] - } - } -} - -// Free returns the Buffer to its Pool. -// -// Callers must not retain references to the Buffer after calling Free. -func (b *Buffer) Free() { - b.pool.put(b) -} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go deleted file mode 100644 index 8fb3e202..00000000 --- a/vendor/go.uber.org/zap/buffer/pool.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package buffer - -import "sync" - -// A Pool is a type-safe wrapper around a sync.Pool. -type Pool struct { - p *sync.Pool -} - -// NewPool constructs a new Pool. -func NewPool() Pool { - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, _size)} - }, - }} -} - -// Get retrieves a Buffer from the pool, creating one if necessary. -func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) - buf.Reset() - buf.pool = p - return buf -} - -func (p Pool) put(buf *Buffer) { - p.p.Put(buf) -} diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh deleted file mode 100644 index 345ac8b8..00000000 --- a/vendor/go.uber.org/zap/checklicense.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -e - -ERROR_COUNT=0 -while read -r file -do - case "$(head -1 "${file}")" in - *"Copyright (c) "*" Uber Technologies, Inc.") - # everything's cool - ;; - *) - echo "$file is missing license header." - (( ERROR_COUNT++ )) - ;; - esac -done < <(git ls-files "*\.go") - -exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go deleted file mode 100644 index 55637fb0..00000000 --- a/vendor/go.uber.org/zap/config.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "sort" - "time" - - "go.uber.org/zap/zapcore" -) - -// SamplingConfig sets a sampling strategy for the logger. Sampling caps the -// global CPU and I/O load that logging puts on your process while attempting -// to preserve a representative subset of your logs. -// -// If specified, the Sampler will invoke the Hook after each decision. -// -// Values configured here are per-second. See zapcore.NewSamplerWithOptions for -// details. -type SamplingConfig struct { - Initial int `json:"initial" yaml:"initial"` - Thereafter int `json:"thereafter" yaml:"thereafter"` - Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` -} - -// Config offers a declarative way to construct a logger. It doesn't do -// anything that can't be done with New, Options, and the various -// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to -// toggle common options. -// -// Note that Config intentionally supports only the most common options. More -// unusual logging setups (logging to network connections or message queues, -// splitting output between multiple files, etc.) are possible, but require -// direct use of the zapcore package. For sample code, see the package-level -// BasicConfiguration and AdvancedConfiguration examples. -// -// For an example showing runtime log level changes, see the documentation for -// AtomicLevel. -type Config struct { - // Level is the minimum enabled logging level. Note that this is a dynamic - // level, so calling Config.Level.SetLevel will atomically change the log - // level of all loggers descended from this config. - Level AtomicLevel `json:"level" yaml:"level"` - // Development puts the logger in development mode, which changes the - // behavior of DPanicLevel and takes stacktraces more liberally. - Development bool `json:"development" yaml:"development"` - // DisableCaller stops annotating logs with the calling function's file - // name and line number. By default, all logs are annotated. - DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` - // DisableStacktrace completely disables automatic stacktrace capturing. By - // default, stacktraces are captured for WarnLevel and above logs in - // development and ErrorLevel and above in production. - DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` - // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. - Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` - // Encoding sets the logger's encoding. Valid values are "json" and - // "console", as well as any third-party encodings registered via - // RegisterEncoder. - Encoding string `json:"encoding" yaml:"encoding"` - // EncoderConfig sets options for the chosen encoder. See - // zapcore.EncoderConfig for details. - EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` - // OutputPaths is a list of URLs or file paths to write logging output to. - // See Open for details. - OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` - // ErrorOutputPaths is a list of URLs to write internal logger errors to. - // The default is standard error. - // - // Note that this setting only affects internal errors; for sample code that - // sends error-level logs to a different location from info- and debug-level - // logs, see the package-level AdvancedConfiguration example. - ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` - // InitialFields is a collection of fields to add to the root logger. - InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` -} - -// NewProductionEncoderConfig returns an opinionated EncoderConfig for -// production environments. -func NewProductionEncoderConfig() zapcore.EncoderConfig { - return zapcore.EncoderConfig{ - TimeKey: "ts", - LevelKey: "level", - NameKey: "logger", - CallerKey: "caller", - FunctionKey: zapcore.OmitKey, - MessageKey: "msg", - StacktraceKey: "stacktrace", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.EpochTimeEncoder, - EncodeDuration: zapcore.SecondsDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, - } -} - -// NewProductionConfig is a reasonable production logging configuration. -// Logging is enabled at InfoLevel and above. -// -// It uses a JSON encoder, writes to standard error, and enables sampling. -// Stacktraces are automatically included on logs of ErrorLevel and above. -func NewProductionConfig() Config { - return Config{ - Level: NewAtomicLevelAt(InfoLevel), - Development: false, - Sampling: &SamplingConfig{ - Initial: 100, - Thereafter: 100, - }, - Encoding: "json", - EncoderConfig: NewProductionEncoderConfig(), - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - } -} - -// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for -// development environments. -func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { - return zapcore.EncoderConfig{ - // Keys can be anything except the empty string. - TimeKey: "T", - LevelKey: "L", - NameKey: "N", - CallerKey: "C", - FunctionKey: zapcore.OmitKey, - MessageKey: "M", - StacktraceKey: "S", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.CapitalLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, - } -} - -// NewDevelopmentConfig is a reasonable development logging configuration. -// Logging is enabled at DebugLevel and above. -// -// It enables development mode (which makes DPanicLevel logs panic), uses a -// console encoder, writes to standard error, and disables sampling. -// Stacktraces are automatically included on logs of WarnLevel and above. -func NewDevelopmentConfig() Config { - return Config{ - Level: NewAtomicLevelAt(DebugLevel), - Development: true, - Encoding: "console", - EncoderConfig: NewDevelopmentEncoderConfig(), - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - } -} - -// Build constructs a logger from the Config and Options. -func (cfg Config) Build(opts ...Option) (*Logger, error) { - enc, err := cfg.buildEncoder() - if err != nil { - return nil, err - } - - sink, errSink, err := cfg.openSinks() - if err != nil { - return nil, err - } - - if cfg.Level == (AtomicLevel{}) { - return nil, fmt.Errorf("missing Level") - } - - log := New( - zapcore.NewCore(enc, sink, cfg.Level), - cfg.buildOptions(errSink)..., - ) - if len(opts) > 0 { - log = log.WithOptions(opts...) - } - return log, nil -} - -func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { - opts := []Option{ErrorOutput(errSink)} - - if cfg.Development { - opts = append(opts, Development()) - } - - if !cfg.DisableCaller { - opts = append(opts, AddCaller()) - } - - stackLevel := ErrorLevel - if cfg.Development { - stackLevel = WarnLevel - } - if !cfg.DisableStacktrace { - opts = append(opts, AddStacktrace(stackLevel)) - } - - if scfg := cfg.Sampling; scfg != nil { - opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { - var samplerOpts []zapcore.SamplerOption - if scfg.Hook != nil { - samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) - } - return zapcore.NewSamplerWithOptions( - core, - time.Second, - cfg.Sampling.Initial, - cfg.Sampling.Thereafter, - samplerOpts..., - ) - })) - } - - if len(cfg.InitialFields) > 0 { - fs := make([]Field, 0, len(cfg.InitialFields)) - keys := make([]string, 0, len(cfg.InitialFields)) - for k := range cfg.InitialFields { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - fs = append(fs, Any(k, cfg.InitialFields[k])) - } - opts = append(opts, Fields(fs...)) - } - - return opts -} - -func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { - sink, closeOut, err := Open(cfg.OutputPaths...) - if err != nil { - return nil, nil, err - } - errSink, _, err := Open(cfg.ErrorOutputPaths...) - if err != nil { - closeOut() - return nil, nil, err - } - return sink, errSink, nil -} - -func (cfg Config) buildEncoder() (zapcore.Encoder, error) { - return newEncoder(cfg.Encoding, cfg.EncoderConfig) -} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go deleted file mode 100644 index 8638dd1b..00000000 --- a/vendor/go.uber.org/zap/doc.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package zap provides fast, structured, leveled logging. -// -// For applications that log in the hot path, reflection-based serialization -// and string formatting are prohibitively expensive - they're CPU-intensive -// and make many small allocations. Put differently, using json.Marshal and -// fmt.Fprintf to log tons of interface{} makes your application slow. -// -// Zap takes a different approach. It includes a reflection-free, -// zero-allocation JSON encoder, and the base Logger strives to avoid -// serialization overhead and allocations wherever possible. By building the -// high-level SugaredLogger on that foundation, zap lets users choose when -// they need to count every allocation and when they'd prefer a more familiar, -// loosely typed API. -// -// Choosing a Logger -// -// In contexts where performance is nice, but not critical, use the -// SugaredLogger. It's 4-10x faster than other structured logging packages and -// supports both structured and printf-style logging. Like log15 and go-kit, -// the SugaredLogger's structured logging APIs are loosely typed and accept a -// variadic number of key-value pairs. (For more advanced use cases, they also -// accept strongly typed fields - see the SugaredLogger.With documentation for -// details.) -// sugar := zap.NewExample().Sugar() -// defer sugar.Sync() -// sugar.Infow("failed to fetch URL", -// "url", "http://example.com", -// "attempt", 3, -// "backoff", time.Second, -// ) -// sugar.Infof("failed to fetch URL: %s", "http://example.com") -// -// By default, loggers are unbuffered. However, since zap's low-level APIs -// allow buffering, calling Sync before letting your process exit is a good -// habit. -// -// In the rare contexts where every microsecond and every allocation matter, -// use the Logger. It's even faster than the SugaredLogger and allocates far -// less, but it only supports strongly-typed, structured logging. -// logger := zap.NewExample() -// defer logger.Sync() -// logger.Info("failed to fetch URL", -// zap.String("url", "http://example.com"), -// zap.Int("attempt", 3), -// zap.Duration("backoff", time.Second), -// ) -// -// Choosing between the Logger and SugaredLogger doesn't need to be an -// application-wide decision: converting between the two is simple and -// inexpensive. -// logger := zap.NewExample() -// defer logger.Sync() -// sugar := logger.Sugar() -// plain := sugar.Desugar() -// -// Configuring Zap -// -// The simplest way to build a Logger is to use zap's opinionated presets: -// NewExample, NewProduction, and NewDevelopment. These presets build a logger -// with a single function call: -// logger, err := zap.NewProduction() -// if err != nil { -// log.Fatalf("can't initialize zap logger: %v", err) -// } -// defer logger.Sync() -// -// Presets are fine for small projects, but larger projects and organizations -// naturally require a bit more customization. For most users, zap's Config -// struct strikes the right balance between flexibility and convenience. See -// the package-level BasicConfiguration example for sample code. -// -// More unusual configurations (splitting output between files, sending logs -// to a message queue, etc.) are possible, but require direct use of -// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration -// example for sample code. -// -// Extending Zap -// -// The zap package itself is a relatively thin wrapper around the interfaces -// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., -// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an -// exception aggregation service, like Sentry or Rollbar) typically requires -// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core -// interfaces. See the zapcore documentation for details. -// -// Similarly, package authors can use the high-performance Encoder and Core -// implementations in the zapcore package to build their own loggers. -// -// Frequently Asked Questions -// -// An FAQ covering everything from installation errors to design decisions is -// available at https://github.com/uber-go/zap/blob/master/FAQ.md. -package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go deleted file mode 100644 index 08ed8335..00000000 --- a/vendor/go.uber.org/zap/encoder.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "errors" - "fmt" - "sync" - - "go.uber.org/zap/zapcore" -) - -var ( - errNoEncoderNameSpecified = errors.New("no encoder name specified") - - _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ - "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - return zapcore.NewConsoleEncoder(encoderConfig), nil - }, - "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - return zapcore.NewJSONEncoder(encoderConfig), nil - }, - } - _encoderMutex sync.RWMutex -) - -// RegisterEncoder registers an encoder constructor, which the Config struct -// can then reference. By default, the "json" and "console" encoders are -// registered. -// -// Attempting to register an encoder whose name is already taken returns an -// error. -func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { - _encoderMutex.Lock() - defer _encoderMutex.Unlock() - if name == "" { - return errNoEncoderNameSpecified - } - if _, ok := _encoderNameToConstructor[name]; ok { - return fmt.Errorf("encoder already registered for name %q", name) - } - _encoderNameToConstructor[name] = constructor - return nil -} - -func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { - return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") - } - - _encoderMutex.RLock() - defer _encoderMutex.RUnlock() - if name == "" { - return nil, errNoEncoderNameSpecified - } - constructor, ok := _encoderNameToConstructor[name] - if !ok { - return nil, fmt.Errorf("no encoder registered for name %q", name) - } - return constructor(encoderConfig) -} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go deleted file mode 100644 index 65982a51..00000000 --- a/vendor/go.uber.org/zap/error.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "sync" - - "go.uber.org/zap/zapcore" -) - -var _errArrayElemPool = sync.Pool{New: func() interface{} { - return &errArrayElem{} -}} - -// Error is shorthand for the common idiom NamedError("error", err). -func Error(err error) Field { - return NamedError("error", err) -} - -// NamedError constructs a field that lazily stores err.Error() under the -// provided key. Errors which also implement fmt.Formatter (like those produced -// by github.com/pkg/errors) will also have their verbose representation stored -// under key+"Verbose". If passed a nil error, the field is a no-op. -// -// For the common case in which the key is simply "error", the Error function -// is shorter and less repetitive. -func NamedError(key string, err error) Field { - if err == nil { - return Skip() - } - return Field{Key: key, Type: zapcore.ErrorType, Interface: err} -} - -type errArray []error - -func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range errs { - if errs[i] == nil { - continue - } - // To represent each error as an object with an "error" attribute and - // potentially an "errorVerbose" attribute, we need to wrap it in a - // type that implements LogObjectMarshaler. To prevent this from - // allocating, pool the wrapper type. - elem := _errArrayElemPool.Get().(*errArrayElem) - elem.error = errs[i] - arr.AppendObject(elem) - elem.error = nil - _errArrayElemPool.Put(elem) - } - return nil -} - -type errArrayElem struct { - error -} - -func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { - // Re-use the error field's logic, which supports non-standard error types. - Error(e.error).AddTo(enc) - return nil -} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go deleted file mode 100644 index bbb745db..00000000 --- a/vendor/go.uber.org/zap/field.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "math" - "time" - - "go.uber.org/zap/zapcore" -) - -// Field is an alias for Field. Aliasing this type dramatically -// improves the navigability of this package's API documentation. -type Field = zapcore.Field - -var ( - _minTimeInt64 = time.Unix(0, math.MinInt64) - _maxTimeInt64 = time.Unix(0, math.MaxInt64) -) - -// Skip constructs a no-op field, which is often useful when handling invalid -// inputs in other Field constructors. -func Skip() Field { - return Field{Type: zapcore.SkipType} -} - -// nilField returns a field which will marshal explicitly as nil. See motivation -// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking -// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the -// implementation here should be changed to reflect that. -func nilField(key string) Field { return Reflect(key, nil) } - -// Binary constructs a field that carries an opaque binary blob. -// -// Binary data is serialized in an encoding-appropriate format. For example, -// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, -// use ByteString. -func Binary(key string, val []byte) Field { - return Field{Key: key, Type: zapcore.BinaryType, Interface: val} -} - -// Bool constructs a field that carries a bool. -func Bool(key string, val bool) Field { - var ival int64 - if val { - ival = 1 - } - return Field{Key: key, Type: zapcore.BoolType, Integer: ival} -} - -// Boolp constructs a field that carries a *bool. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Boolp(key string, val *bool) Field { - if val == nil { - return nilField(key) - } - return Bool(key, *val) -} - -// ByteString constructs a field that carries UTF-8 encoded text as a []byte. -// To log opaque binary blobs (which aren't necessarily valid UTF-8), use -// Binary. -func ByteString(key string, val []byte) Field { - return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} -} - -// Complex128 constructs a field that carries a complex number. Unlike most -// numeric fields, this costs an allocation (to convert the complex128 to -// interface{}). -func Complex128(key string, val complex128) Field { - return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} -} - -// Complex128p constructs a field that carries a *complex128. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Complex128p(key string, val *complex128) Field { - if val == nil { - return nilField(key) - } - return Complex128(key, *val) -} - -// Complex64 constructs a field that carries a complex number. Unlike most -// numeric fields, this costs an allocation (to convert the complex64 to -// interface{}). -func Complex64(key string, val complex64) Field { - return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} -} - -// Complex64p constructs a field that carries a *complex64. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Complex64p(key string, val *complex64) Field { - if val == nil { - return nilField(key) - } - return Complex64(key, *val) -} - -// Float64 constructs a field that carries a float64. The way the -// floating-point value is represented is encoder-dependent, so marshaling is -// necessarily lazy. -func Float64(key string, val float64) Field { - return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} -} - -// Float64p constructs a field that carries a *float64. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Float64p(key string, val *float64) Field { - if val == nil { - return nilField(key) - } - return Float64(key, *val) -} - -// Float32 constructs a field that carries a float32. The way the -// floating-point value is represented is encoder-dependent, so marshaling is -// necessarily lazy. -func Float32(key string, val float32) Field { - return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} -} - -// Float32p constructs a field that carries a *float32. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Float32p(key string, val *float32) Field { - if val == nil { - return nilField(key) - } - return Float32(key, *val) -} - -// Int constructs a field with the given key and value. -func Int(key string, val int) Field { - return Int64(key, int64(val)) -} - -// Intp constructs a field that carries a *int. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Intp(key string, val *int) Field { - if val == nil { - return nilField(key) - } - return Int(key, *val) -} - -// Int64 constructs a field with the given key and value. -func Int64(key string, val int64) Field { - return Field{Key: key, Type: zapcore.Int64Type, Integer: val} -} - -// Int64p constructs a field that carries a *int64. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Int64p(key string, val *int64) Field { - if val == nil { - return nilField(key) - } - return Int64(key, *val) -} - -// Int32 constructs a field with the given key and value. -func Int32(key string, val int32) Field { - return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} -} - -// Int32p constructs a field that carries a *int32. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Int32p(key string, val *int32) Field { - if val == nil { - return nilField(key) - } - return Int32(key, *val) -} - -// Int16 constructs a field with the given key and value. -func Int16(key string, val int16) Field { - return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} -} - -// Int16p constructs a field that carries a *int16. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Int16p(key string, val *int16) Field { - if val == nil { - return nilField(key) - } - return Int16(key, *val) -} - -// Int8 constructs a field with the given key and value. -func Int8(key string, val int8) Field { - return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} -} - -// Int8p constructs a field that carries a *int8. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Int8p(key string, val *int8) Field { - if val == nil { - return nilField(key) - } - return Int8(key, *val) -} - -// String constructs a field with the given key and value. -func String(key string, val string) Field { - return Field{Key: key, Type: zapcore.StringType, String: val} -} - -// Stringp constructs a field that carries a *string. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Stringp(key string, val *string) Field { - if val == nil { - return nilField(key) - } - return String(key, *val) -} - -// Uint constructs a field with the given key and value. -func Uint(key string, val uint) Field { - return Uint64(key, uint64(val)) -} - -// Uintp constructs a field that carries a *uint. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uintp(key string, val *uint) Field { - if val == nil { - return nilField(key) - } - return Uint(key, *val) -} - -// Uint64 constructs a field with the given key and value. -func Uint64(key string, val uint64) Field { - return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} -} - -// Uint64p constructs a field that carries a *uint64. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uint64p(key string, val *uint64) Field { - if val == nil { - return nilField(key) - } - return Uint64(key, *val) -} - -// Uint32 constructs a field with the given key and value. -func Uint32(key string, val uint32) Field { - return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} -} - -// Uint32p constructs a field that carries a *uint32. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uint32p(key string, val *uint32) Field { - if val == nil { - return nilField(key) - } - return Uint32(key, *val) -} - -// Uint16 constructs a field with the given key and value. -func Uint16(key string, val uint16) Field { - return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} -} - -// Uint16p constructs a field that carries a *uint16. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uint16p(key string, val *uint16) Field { - if val == nil { - return nilField(key) - } - return Uint16(key, *val) -} - -// Uint8 constructs a field with the given key and value. -func Uint8(key string, val uint8) Field { - return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} -} - -// Uint8p constructs a field that carries a *uint8. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uint8p(key string, val *uint8) Field { - if val == nil { - return nilField(key) - } - return Uint8(key, *val) -} - -// Uintptr constructs a field with the given key and value. -func Uintptr(key string, val uintptr) Field { - return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} -} - -// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uintptrp(key string, val *uintptr) Field { - if val == nil { - return nilField(key) - } - return Uintptr(key, *val) -} - -// Reflect constructs a field with the given key and an arbitrary object. It uses -// an encoding-appropriate, reflection-based function to lazily serialize nearly -// any object into the logging context, but it's relatively slow and -// allocation-heavy. Outside tests, Any is always a better choice. -// -// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect -// includes the error message in the final log output. -func Reflect(key string, val interface{}) Field { - return Field{Key: key, Type: zapcore.ReflectType, Interface: val} -} - -// Namespace creates a named, isolated scope within the logger's context. All -// subsequent fields will be added to the new namespace. -// -// This helps prevent key collisions when injecting loggers into sub-components -// or third-party libraries. -func Namespace(key string) Field { - return Field{Key: key, Type: zapcore.NamespaceType} -} - -// Stringer constructs a field with the given key and the output of the value's -// String method. The Stringer's String method is called lazily. -func Stringer(key string, val fmt.Stringer) Field { - return Field{Key: key, Type: zapcore.StringerType, Interface: val} -} - -// Time constructs a Field with the given key and value. The encoder -// controls how the time is serialized. -func Time(key string, val time.Time) Field { - if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { - return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} - } - return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} -} - -// Timep constructs a field that carries a *time.Time. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Timep(key string, val *time.Time) Field { - if val == nil { - return nilField(key) - } - return Time(key, *val) -} - -// Stack constructs a field that stores a stacktrace of the current goroutine -// under provided key. Keep in mind that taking a stacktrace is eager and -// expensive (relatively speaking); this function both makes an allocation and -// takes about two microseconds. -func Stack(key string) Field { - return StackSkip(key, 1) // skip Stack -} - -// StackSkip constructs a field similarly to Stack, but also skips the given -// number of frames from the top of the stacktrace. -func StackSkip(key string, skip int) Field { - // Returning the stacktrace as a string costs an allocation, but saves us - // from expanding the zapcore.Field union struct to include a byte slice. Since - // taking a stacktrace is already so expensive (~10us), the extra allocation - // is okay. - return String(key, takeStacktrace(skip+1)) // skip StackSkip -} - -// Duration constructs a field with the given key and value. The encoder -// controls how the duration is serialized. -func Duration(key string, val time.Duration) Field { - return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} -} - -// Durationp constructs a field that carries a *time.Duration. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Durationp(key string, val *time.Duration) Field { - if val == nil { - return nilField(key) - } - return Duration(key, *val) -} - -// Object constructs a field with the given key and ObjectMarshaler. It -// provides a flexible, but still type-safe and efficient, way to add map- or -// struct-like user-defined types to the logging context. The struct's -// MarshalLogObject method is called lazily. -func Object(key string, val zapcore.ObjectMarshaler) Field { - return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} -} - -// Inline constructs a Field that is similar to Object, but it -// will add the elements of the provided ObjectMarshaler to the -// current namespace. -func Inline(val zapcore.ObjectMarshaler) Field { - return zapcore.Field{ - Type: zapcore.InlineMarshalerType, - Interface: val, - } -} - -// Any takes a key and an arbitrary value and chooses the best way to represent -// them as a field, falling back to a reflection-based approach only if -// necessary. -// -// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between -// them. To minimize surprises, []byte values are treated as binary blobs, byte -// values are treated as uint8, and runes are always treated as integers. -func Any(key string, value interface{}) Field { - switch val := value.(type) { - case zapcore.ObjectMarshaler: - return Object(key, val) - case zapcore.ArrayMarshaler: - return Array(key, val) - case bool: - return Bool(key, val) - case *bool: - return Boolp(key, val) - case []bool: - return Bools(key, val) - case complex128: - return Complex128(key, val) - case *complex128: - return Complex128p(key, val) - case []complex128: - return Complex128s(key, val) - case complex64: - return Complex64(key, val) - case *complex64: - return Complex64p(key, val) - case []complex64: - return Complex64s(key, val) - case float64: - return Float64(key, val) - case *float64: - return Float64p(key, val) - case []float64: - return Float64s(key, val) - case float32: - return Float32(key, val) - case *float32: - return Float32p(key, val) - case []float32: - return Float32s(key, val) - case int: - return Int(key, val) - case *int: - return Intp(key, val) - case []int: - return Ints(key, val) - case int64: - return Int64(key, val) - case *int64: - return Int64p(key, val) - case []int64: - return Int64s(key, val) - case int32: - return Int32(key, val) - case *int32: - return Int32p(key, val) - case []int32: - return Int32s(key, val) - case int16: - return Int16(key, val) - case *int16: - return Int16p(key, val) - case []int16: - return Int16s(key, val) - case int8: - return Int8(key, val) - case *int8: - return Int8p(key, val) - case []int8: - return Int8s(key, val) - case string: - return String(key, val) - case *string: - return Stringp(key, val) - case []string: - return Strings(key, val) - case uint: - return Uint(key, val) - case *uint: - return Uintp(key, val) - case []uint: - return Uints(key, val) - case uint64: - return Uint64(key, val) - case *uint64: - return Uint64p(key, val) - case []uint64: - return Uint64s(key, val) - case uint32: - return Uint32(key, val) - case *uint32: - return Uint32p(key, val) - case []uint32: - return Uint32s(key, val) - case uint16: - return Uint16(key, val) - case *uint16: - return Uint16p(key, val) - case []uint16: - return Uint16s(key, val) - case uint8: - return Uint8(key, val) - case *uint8: - return Uint8p(key, val) - case []byte: - return Binary(key, val) - case uintptr: - return Uintptr(key, val) - case *uintptr: - return Uintptrp(key, val) - case []uintptr: - return Uintptrs(key, val) - case time.Time: - return Time(key, val) - case *time.Time: - return Timep(key, val) - case []time.Time: - return Times(key, val) - case time.Duration: - return Duration(key, val) - case *time.Duration: - return Durationp(key, val) - case []time.Duration: - return Durations(key, val) - case error: - return NamedError(key, val) - case []error: - return Errors(key, val) - case fmt.Stringer: - return Stringer(key, val) - default: - return Reflect(key, val) - } -} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go deleted file mode 100644 index 13128750..00000000 --- a/vendor/go.uber.org/zap/flag.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "flag" - - "go.uber.org/zap/zapcore" -) - -// LevelFlag uses the standard library's flag.Var to declare a global flag -// with the specified name, default, and usage guidance. The returned value is -// a pointer to the value of the flag. -// -// If you don't want to use the flag package's global state, you can use any -// non-nil *Level as a flag.Value with your own *flag.FlagSet. -func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { - lvl := defaultLevel - flag.Var(&lvl, name, usage) - return &lvl -} diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml deleted file mode 100644 index 8e1d05e9..00000000 --- a/vendor/go.uber.org/zap/glide.yaml +++ /dev/null @@ -1,34 +0,0 @@ -package: go.uber.org/zap -license: MIT -import: -- package: go.uber.org/atomic - version: ^1 -- package: go.uber.org/multierr - version: ^1 -testImport: -- package: github.com/satori/go.uuid -- package: github.com/sirupsen/logrus -- package: github.com/apex/log - subpackages: - - handlers/json -- package: github.com/go-kit/kit - subpackages: - - log -- package: github.com/stretchr/testify - subpackages: - - assert - - require -- package: gopkg.in/inconshreveable/log15.v2 -- package: github.com/mattn/goveralls -- package: github.com/pborman/uuid -- package: github.com/pkg/errors -- package: github.com/rs/zerolog -- package: golang.org/x/tools - subpackages: - - cover -- package: golang.org/x/lint - subpackages: - - golint -- package: github.com/axw/gocov - subpackages: - - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go deleted file mode 100644 index c1ac0507..00000000 --- a/vendor/go.uber.org/zap/global.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "bytes" - "fmt" - "log" - "os" - "sync" - - "go.uber.org/zap/zapcore" -) - -const ( - _loggerWriterDepth = 2 - _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + - "https://github.com/uber-go/zap/issues/new and reference this error: %v" -) - -var ( - _globalMu sync.RWMutex - _globalL = NewNop() - _globalS = _globalL.Sugar() -) - -// L returns the global Logger, which can be reconfigured with ReplaceGlobals. -// It's safe for concurrent use. -func L() *Logger { - _globalMu.RLock() - l := _globalL - _globalMu.RUnlock() - return l -} - -// S returns the global SugaredLogger, which can be reconfigured with -// ReplaceGlobals. It's safe for concurrent use. -func S() *SugaredLogger { - _globalMu.RLock() - s := _globalS - _globalMu.RUnlock() - return s -} - -// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a -// function to restore the original values. It's safe for concurrent use. -func ReplaceGlobals(logger *Logger) func() { - _globalMu.Lock() - prev := _globalL - _globalL = logger - _globalS = logger.Sugar() - _globalMu.Unlock() - return func() { ReplaceGlobals(prev) } -} - -// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at -// InfoLevel. To redirect the standard library's package-global logging -// functions, use RedirectStdLog instead. -func NewStdLog(l *Logger) *log.Logger { - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - f := logger.Info - return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) -} - -// NewStdLogAt returns *log.Logger which writes to supplied zap logger at -// required level. -func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - logFunc, err := levelToFunc(logger, level) - if err != nil { - return nil, err - } - return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil -} - -// RedirectStdLog redirects output from the standard library's package-global -// logger to the supplied logger at InfoLevel. Since zap already handles caller -// annotations, timestamps, etc., it automatically disables the standard -// library's annotations and prefixing. -// -// It returns a function to restore the original prefix and flags and reset the -// standard library's output to os.Stderr. -func RedirectStdLog(l *Logger) func() { - f, err := redirectStdLogAt(l, InfoLevel) - if err != nil { - // Can't get here, since passing InfoLevel to redirectStdLogAt always - // works. - panic(fmt.Sprintf(_programmerErrorTemplate, err)) - } - return f -} - -// RedirectStdLogAt redirects output from the standard library's package-global -// logger to the supplied logger at the specified level. Since zap already -// handles caller annotations, timestamps, etc., it automatically disables the -// standard library's annotations and prefixing. -// -// It returns a function to restore the original prefix and flags and reset the -// standard library's output to os.Stderr. -func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { - return redirectStdLogAt(l, level) -} - -func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { - flags := log.Flags() - prefix := log.Prefix() - log.SetFlags(0) - log.SetPrefix("") - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - logFunc, err := levelToFunc(logger, level) - if err != nil { - return nil, err - } - log.SetOutput(&loggerWriter{logFunc}) - return func() { - log.SetFlags(flags) - log.SetPrefix(prefix) - log.SetOutput(os.Stderr) - }, nil -} - -func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { - switch lvl { - case DebugLevel: - return logger.Debug, nil - case InfoLevel: - return logger.Info, nil - case WarnLevel: - return logger.Warn, nil - case ErrorLevel: - return logger.Error, nil - case DPanicLevel: - return logger.DPanic, nil - case PanicLevel: - return logger.Panic, nil - case FatalLevel: - return logger.Fatal, nil - } - return nil, fmt.Errorf("unrecognized level: %q", lvl) -} - -type loggerWriter struct { - logFunc func(msg string, fields ...Field) -} - -func (l *loggerWriter) Write(p []byte) (int, error) { - p = bytes.TrimSpace(p) - l.logFunc(string(p)) - return len(p), nil -} diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go deleted file mode 100644 index 6b5dbda8..00000000 --- a/vendor/go.uber.org/zap/global_go112.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// See #682 for more information. -// +build go1.12 - -package zap - -const _stdLogDefaultDepth = 1 diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go deleted file mode 100644 index d3ab9af9..00000000 --- a/vendor/go.uber.org/zap/global_prego112.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// See #682 for more information. -// +build !go1.12 - -package zap - -const _stdLogDefaultDepth = 2 diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go deleted file mode 100644 index 1297c33b..00000000 --- a/vendor/go.uber.org/zap/http_handler.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - - "go.uber.org/zap/zapcore" -) - -// ServeHTTP is a simple JSON endpoint that can report on or change the current -// logging level. -// -// GET -// -// The GET request returns a JSON description of the current logging level like: -// {"level":"info"} -// -// PUT -// -// The PUT request changes the logging level. It is perfectly safe to change the -// logging level while a program is running. Two content types are supported: -// -// Content-Type: application/x-www-form-urlencoded -// -// With this content type, the level can be provided through the request body or -// a query parameter. The log level is URL encoded like: -// -// level=debug -// -// The request body takes precedence over the query parameter, if both are -// specified. -// -// This content type is the default for a curl PUT request. Following are two -// example curl requests that both set the logging level to debug. -// -// curl -X PUT localhost:8080/log/level?level=debug -// curl -X PUT localhost:8080/log/level -d level=debug -// -// For any other content type, the payload is expected to be JSON encoded and -// look like: -// -// {"level":"info"} -// -// An example curl request could look like this: -// -// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' -// -func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { - type errorResponse struct { - Error string `json:"error"` - } - type payload struct { - Level zapcore.Level `json:"level"` - } - - enc := json.NewEncoder(w) - - switch r.Method { - case http.MethodGet: - enc.Encode(payload{Level: lvl.Level()}) - case http.MethodPut: - requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: err.Error()}) - return - } - lvl.SetLevel(requestedLvl) - enc.Encode(payload{Level: lvl.Level()}) - default: - w.WriteHeader(http.StatusMethodNotAllowed) - enc.Encode(errorResponse{ - Error: "Only GET and PUT are supported.", - }) - } -} - -// Decodes incoming PUT requests and returns the requested logging level. -func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { - if contentType == "application/x-www-form-urlencoded" { - return decodePutURL(r) - } - return decodePutJSON(r.Body) -} - -func decodePutURL(r *http.Request) (zapcore.Level, error) { - lvl := r.FormValue("level") - if lvl == "" { - return 0, fmt.Errorf("must specify logging level") - } - var l zapcore.Level - if err := l.UnmarshalText([]byte(lvl)); err != nil { - return 0, err - } - return l, nil -} - -func decodePutJSON(body io.Reader) (zapcore.Level, error) { - var pld struct { - Level *zapcore.Level `json:"level"` - } - if err := json.NewDecoder(body).Decode(&pld); err != nil { - return 0, fmt.Errorf("malformed request body: %v", err) - } - if pld.Level == nil { - return 0, fmt.Errorf("must specify logging level") - } - return *pld.Level, nil - -} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go deleted file mode 100644 index dad583aa..00000000 --- a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package bufferpool houses zap's shared internal buffer pool. Third-party -// packages can recreate the same functionality with buffers.NewPool. -package bufferpool - -import "go.uber.org/zap/buffer" - -var ( - _pool = buffer.NewPool() - // Get retrieves a buffer from the pool, creating one if necessary. - Get = _pool.Get -) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go deleted file mode 100644 index c4d5d02a..00000000 --- a/vendor/go.uber.org/zap/internal/color/color.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package color adds coloring functionality for TTY output. -package color - -import "fmt" - -// Foreground colors. -const ( - Black Color = iota + 30 - Red - Green - Yellow - Blue - Magenta - Cyan - White -) - -// Color represents a text color. -type Color uint8 - -// Add adds the coloring to the given string. -func (c Color) Add(s string) string { - return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) -} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go deleted file mode 100644 index dfc5b05f..00000000 --- a/vendor/go.uber.org/zap/internal/exit/exit.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package exit provides stubs so that unit tests can exercise code that calls -// os.Exit(1). -package exit - -import "os" - -var real = func() { os.Exit(1) } - -// Exit normally terminates the process by calling os.Exit(1). If the package -// is stubbed, it instead records a call in the testing spy. -func Exit() { - real() -} - -// A StubbedExit is a testing fake for os.Exit. -type StubbedExit struct { - Exited bool - prev func() -} - -// Stub substitutes a fake for the call to os.Exit(1). -func Stub() *StubbedExit { - s := &StubbedExit{prev: real} - real = s.exit - return s -} - -// WithStub runs the supplied function with Exit stubbed. It returns the stub -// used, so that users can test whether the process would have crashed. -func WithStub(f func()) *StubbedExit { - s := Stub() - defer s.Unstub() - f() - return s -} - -// Unstub restores the previous exit function. -func (se *StubbedExit) Unstub() { - real = se.prev -} - -func (se *StubbedExit) exit() { - se.Exited = true -} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go deleted file mode 100644 index 3567a9a1..00000000 --- a/vendor/go.uber.org/zap/level.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "go.uber.org/atomic" - "go.uber.org/zap/zapcore" -) - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DebugLevel = zapcore.DebugLevel - // InfoLevel is the default logging priority. - InfoLevel = zapcore.InfoLevel - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WarnLevel = zapcore.WarnLevel - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ErrorLevel = zapcore.ErrorLevel - // DPanicLevel logs are particularly important errors. In development the - // logger panics after writing the message. - DPanicLevel = zapcore.DPanicLevel - // PanicLevel logs a message, then panics. - PanicLevel = zapcore.PanicLevel - // FatalLevel logs a message, then calls os.Exit(1). - FatalLevel = zapcore.FatalLevel -) - -// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with -// an anonymous function. -// -// It's particularly useful when splitting log output between different -// outputs (e.g., standard error and standard out). For sample code, see the -// package-level AdvancedConfiguration example. -type LevelEnablerFunc func(zapcore.Level) bool - -// Enabled calls the wrapped function. -func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } - -// An AtomicLevel is an atomically changeable, dynamic logging level. It lets -// you safely change the log level of a tree of loggers (the root logger and -// any children created by adding context) at runtime. -// -// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to -// alter its level. -// -// AtomicLevels must be created with the NewAtomicLevel constructor to allocate -// their internal atomic pointer. -type AtomicLevel struct { - l *atomic.Int32 -} - -// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging -// enabled. -func NewAtomicLevel() AtomicLevel { - return AtomicLevel{ - l: atomic.NewInt32(int32(InfoLevel)), - } -} - -// NewAtomicLevelAt is a convenience function that creates an AtomicLevel -// and then calls SetLevel with the given level. -func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { - a := NewAtomicLevel() - a.SetLevel(l) - return a -} - -// Enabled implements the zapcore.LevelEnabler interface, which allows the -// AtomicLevel to be used in place of traditional static levels. -func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { - return lvl.Level().Enabled(l) -} - -// Level returns the minimum enabled log level. -func (lvl AtomicLevel) Level() zapcore.Level { - return zapcore.Level(int8(lvl.l.Load())) -} - -// SetLevel alters the logging level. -func (lvl AtomicLevel) SetLevel(l zapcore.Level) { - lvl.l.Store(int32(l)) -} - -// String returns the string representation of the underlying Level. -func (lvl AtomicLevel) String() string { - return lvl.Level().String() -} - -// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text -// representations as the static zapcore.Levels ("debug", "info", "warn", -// "error", "dpanic", "panic", and "fatal"). -func (lvl *AtomicLevel) UnmarshalText(text []byte) error { - if lvl.l == nil { - lvl.l = &atomic.Int32{} - } - - var l zapcore.Level - if err := l.UnmarshalText(text); err != nil { - return err - } - - lvl.SetLevel(l) - return nil -} - -// MarshalText marshals the AtomicLevel to a byte slice. It uses the same -// text representation as the static zapcore.Levels ("debug", "info", "warn", -// "error", "dpanic", "panic", and "fatal"). -func (lvl AtomicLevel) MarshalText() (text []byte, err error) { - return lvl.Level().MarshalText() -} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go deleted file mode 100644 index f116bd93..00000000 --- a/vendor/go.uber.org/zap/logger.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - - "go.uber.org/zap/zapcore" -) - -// A Logger provides fast, leveled, structured logging. All methods are safe -// for concurrent use. -// -// The Logger is designed for contexts in which every microsecond and every -// allocation matters, so its API intentionally favors performance and type -// safety over brevity. For most applications, the SugaredLogger strikes a -// better balance between performance and ergonomics. -type Logger struct { - core zapcore.Core - - development bool - addCaller bool - onFatal zapcore.CheckWriteAction // default is WriteThenFatal - - name string - errorOutput zapcore.WriteSyncer - - addStack zapcore.LevelEnabler - - callerSkip int - - clock zapcore.Clock -} - -// New constructs a new Logger from the provided zapcore.Core and Options. If -// the passed zapcore.Core is nil, it falls back to using a no-op -// implementation. -// -// This is the most flexible way to construct a Logger, but also the most -// verbose. For typical use cases, the highly-opinionated presets -// (NewProduction, NewDevelopment, and NewExample) or the Config struct are -// more convenient. -// -// For sample code, see the package-level AdvancedConfiguration example. -func New(core zapcore.Core, options ...Option) *Logger { - if core == nil { - return NewNop() - } - log := &Logger{ - core: core, - errorOutput: zapcore.Lock(os.Stderr), - addStack: zapcore.FatalLevel + 1, - clock: zapcore.DefaultClock, - } - return log.WithOptions(options...) -} - -// NewNop returns a no-op Logger. It never writes out logs or internal errors, -// and it never runs user-defined hooks. -// -// Using WithOptions to replace the Core or error output of a no-op Logger can -// re-enable logging. -func NewNop() *Logger { - return &Logger{ - core: zapcore.NewNopCore(), - errorOutput: zapcore.AddSync(ioutil.Discard), - addStack: zapcore.FatalLevel + 1, - clock: zapcore.DefaultClock, - } -} - -// NewProduction builds a sensible production Logger that writes InfoLevel and -// above logs to standard error as JSON. -// -// It's a shortcut for NewProductionConfig().Build(...Option). -func NewProduction(options ...Option) (*Logger, error) { - return NewProductionConfig().Build(options...) -} - -// NewDevelopment builds a development Logger that writes DebugLevel and above -// logs to standard error in a human-friendly format. -// -// It's a shortcut for NewDevelopmentConfig().Build(...Option). -func NewDevelopment(options ...Option) (*Logger, error) { - return NewDevelopmentConfig().Build(options...) -} - -// NewExample builds a Logger that's designed for use in zap's testable -// examples. It writes DebugLevel and above logs to standard out as JSON, but -// omits the timestamp and calling function to keep example output -// short and deterministic. -func NewExample(options ...Option) *Logger { - encoderCfg := zapcore.EncoderConfig{ - MessageKey: "msg", - LevelKey: "level", - NameKey: "logger", - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - } - core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) - return New(core).WithOptions(options...) -} - -// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, -// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a -// single application to use both Loggers and SugaredLoggers, converting -// between them on the boundaries of performance-sensitive code. -func (log *Logger) Sugar() *SugaredLogger { - core := log.clone() - core.callerSkip += 2 - return &SugaredLogger{core} -} - -// Named adds a new path segment to the logger's name. Segments are joined by -// periods. By default, Loggers are unnamed. -func (log *Logger) Named(s string) *Logger { - if s == "" { - return log - } - l := log.clone() - if log.name == "" { - l.name = s - } else { - l.name = strings.Join([]string{l.name, s}, ".") - } - return l -} - -// WithOptions clones the current Logger, applies the supplied Options, and -// returns the resulting Logger. It's safe to use concurrently. -func (log *Logger) WithOptions(opts ...Option) *Logger { - c := log.clone() - for _, opt := range opts { - opt.apply(c) - } - return c -} - -// With creates a child logger and adds structured context to it. Fields added -// to the child don't affect the parent, and vice versa. -func (log *Logger) With(fields ...Field) *Logger { - if len(fields) == 0 { - return log - } - l := log.clone() - l.core = l.core.With(fields) - return l -} - -// Check returns a CheckedEntry if logging a message at the specified level -// is enabled. It's a completely optional optimization; in high-performance -// applications, Check can help avoid allocating a slice to hold fields. -func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - return log.check(lvl, msg) -} - -// Debug logs a message at DebugLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Debug(msg string, fields ...Field) { - if ce := log.check(DebugLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Info logs a message at InfoLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Info(msg string, fields ...Field) { - if ce := log.check(InfoLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Warn logs a message at WarnLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Warn(msg string, fields ...Field) { - if ce := log.check(WarnLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Error logs a message at ErrorLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Error(msg string, fields ...Field) { - if ce := log.check(ErrorLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// DPanic logs a message at DPanicLevel. The message includes any fields -// passed at the log site, as well as any fields accumulated on the logger. -// -// If the logger is in development mode, it then panics (DPanic means -// "development panic"). This is useful for catching errors that are -// recoverable, but shouldn't ever happen. -func (log *Logger) DPanic(msg string, fields ...Field) { - if ce := log.check(DPanicLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Panic logs a message at PanicLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -// -// The logger then panics, even if logging at PanicLevel is disabled. -func (log *Logger) Panic(msg string, fields ...Field) { - if ce := log.check(PanicLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Fatal logs a message at FatalLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -// -// The logger then calls os.Exit(1), even if logging at FatalLevel is -// disabled. -func (log *Logger) Fatal(msg string, fields ...Field) { - if ce := log.check(FatalLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Sync calls the underlying Core's Sync method, flushing any buffered log -// entries. Applications should take care to call Sync before exiting. -func (log *Logger) Sync() error { - return log.core.Sync() -} - -// Core returns the Logger's underlying zapcore.Core. -func (log *Logger) Core() zapcore.Core { - return log.core -} - -func (log *Logger) clone() *Logger { - copy := *log - return © -} - -func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - // check must always be called directly by a method in the Logger interface - // (e.g., Check, Info, Fatal). - const callerSkipOffset = 2 - - // Check the level first to reduce the cost of disabled log calls. - // Since Panic and higher may exit, we skip the optimization for those levels. - if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { - return nil - } - - // Create basic checked entry thru the core; this will be non-nil if the - // log message will actually be written somewhere. - ent := zapcore.Entry{ - LoggerName: log.name, - Time: log.clock.Now(), - Level: lvl, - Message: msg, - } - ce := log.core.Check(ent, nil) - willWrite := ce != nil - - // Set up any required terminal behavior. - switch ent.Level { - case zapcore.PanicLevel: - ce = ce.Should(ent, zapcore.WriteThenPanic) - case zapcore.FatalLevel: - onFatal := log.onFatal - // Noop is the default value for CheckWriteAction, and it leads to - // continued execution after a Fatal which is unexpected. - if onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.Should(ent, onFatal) - case zapcore.DPanicLevel: - if log.development { - ce = ce.Should(ent, zapcore.WriteThenPanic) - } - } - - // Only do further annotation if we're going to write this message; checked - // entries that exist only for terminal behavior don't benefit from - // annotation. - if !willWrite { - return ce - } - - // Thread the error output through to the CheckedEntry. - ce.ErrorOutput = log.errorOutput - if log.addCaller { - frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) - if !defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) - log.errorOutput.Sync() - } - - ce.Entry.Caller = zapcore.EntryCaller{ - Defined: defined, - PC: frame.PC, - File: frame.File, - Line: frame.Line, - Function: frame.Function, - } - } - if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String - } - - return ce -} - -// getCallerFrame gets caller frame. The argument skip is the number of stack -// frames to ascend, with 0 identifying the caller of getCallerFrame. The -// boolean ok is false if it was not possible to recover the information. -// -// Note: This implementation is similar to runtime.Caller, but it returns the whole frame. -func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { - const skipOffset = 2 // skip getCallerFrame and Callers - - pc := make([]uintptr, 1) - numFrames := runtime.Callers(skip+skipOffset, pc) - if numFrames < 1 { - return - } - - frame, _ = runtime.CallersFrames(pc).Next() - return frame, frame.PC != 0 -} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go deleted file mode 100644 index e9e66161..00000000 --- a/vendor/go.uber.org/zap/options.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - - "go.uber.org/zap/zapcore" -) - -// An Option configures a Logger. -type Option interface { - apply(*Logger) -} - -// optionFunc wraps a func so it satisfies the Option interface. -type optionFunc func(*Logger) - -func (f optionFunc) apply(log *Logger) { - f(log) -} - -// WrapCore wraps or replaces the Logger's underlying zapcore.Core. -func WrapCore(f func(zapcore.Core) zapcore.Core) Option { - return optionFunc(func(log *Logger) { - log.core = f(log.core) - }) -} - -// Hooks registers functions which will be called each time the Logger writes -// out an Entry. Repeated use of Hooks is additive. -// -// Hooks are useful for simple side effects, like capturing metrics for the -// number of emitted logs. More complex side effects, including anything that -// requires access to the Entry's structured fields, should be implemented as -// a zapcore.Core instead. See zapcore.RegisterHooks for details. -func Hooks(hooks ...func(zapcore.Entry) error) Option { - return optionFunc(func(log *Logger) { - log.core = zapcore.RegisterHooks(log.core, hooks...) - }) -} - -// Fields adds fields to the Logger. -func Fields(fs ...Field) Option { - return optionFunc(func(log *Logger) { - log.core = log.core.With(fs) - }) -} - -// ErrorOutput sets the destination for errors generated by the Logger. Note -// that this option only affects internal errors; for sample code that sends -// error-level logs to a different location from info- and debug-level logs, -// see the package-level AdvancedConfiguration example. -// -// The supplied WriteSyncer must be safe for concurrent use. The Open and -// zapcore.Lock functions are the simplest ways to protect files with a mutex. -func ErrorOutput(w zapcore.WriteSyncer) Option { - return optionFunc(func(log *Logger) { - log.errorOutput = w - }) -} - -// Development puts the logger in development mode, which makes DPanic-level -// logs panic instead of simply logging an error. -func Development() Option { - return optionFunc(func(log *Logger) { - log.development = true - }) -} - -// AddCaller configures the Logger to annotate each message with the filename, -// line number, and function name of zap's caller. See also WithCaller. -func AddCaller() Option { - return WithCaller(true) -} - -// WithCaller configures the Logger to annotate each message with the filename, -// line number, and function name of zap's caller, or not, depending on the -// value of enabled. This is a generalized form of AddCaller. -func WithCaller(enabled bool) Option { - return optionFunc(func(log *Logger) { - log.addCaller = enabled - }) -} - -// AddCallerSkip increases the number of callers skipped by caller annotation -// (as enabled by the AddCaller option). When building wrappers around the -// Logger and SugaredLogger, supplying this Option prevents zap from always -// reporting the wrapper code as the caller. -func AddCallerSkip(skip int) Option { - return optionFunc(func(log *Logger) { - log.callerSkip += skip - }) -} - -// AddStacktrace configures the Logger to record a stack trace for all messages at -// or above a given level. -func AddStacktrace(lvl zapcore.LevelEnabler) Option { - return optionFunc(func(log *Logger) { - log.addStack = lvl - }) -} - -// IncreaseLevel increase the level of the logger. It has no effect if -// the passed in level tries to decrease the level of the logger. -func IncreaseLevel(lvl zapcore.LevelEnabler) Option { - return optionFunc(func(log *Logger) { - core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) - if err != nil { - fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) - } else { - log.core = core - } - }) -} - -// OnFatal sets the action to take on fatal logs. -func OnFatal(action zapcore.CheckWriteAction) Option { - return optionFunc(func(log *Logger) { - log.onFatal = action - }) -} - -// WithClock specifies the clock used by the logger to determine the current -// time for logged entries. Defaults to the system clock with time.Now. -func WithClock(clock zapcore.Clock) Option { - return optionFunc(func(log *Logger) { - log.clock = clock - }) -} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go deleted file mode 100644 index df46fa87..00000000 --- a/vendor/go.uber.org/zap/sink.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "errors" - "fmt" - "io" - "net/url" - "os" - "strings" - "sync" - - "go.uber.org/zap/zapcore" -) - -const schemeFile = "file" - -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} - -// Sink defines the interface to write to and close logger destinations. -type Sink interface { - zapcore.WriteSyncer - io.Closer -} - -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - -type errSinkNotFound struct { - scheme string -} - -func (e *errSinkNotFound) Error() string { - return fmt.Sprintf("no sink found for scheme %q", e.scheme) -} - -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - if scheme == "" { - return errors.New("can't register a sink factory for empty string") - } - normalized, err := normalizeScheme(scheme) - if err != nil { - return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) - } - if _, ok := _sinkFactories[normalized]; ok { - return fmt.Errorf("sink factory already registered for scheme %q", normalized) - } - _sinkFactories[normalized] = factory - return nil -} - -func newSink(rawURL string) (Sink, error) { - u, err := url.Parse(rawURL) - if err != nil { - return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) - } - if u.Scheme == "" { - u.Scheme = schemeFile - } - - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() - if !ok { - return nil, &errSinkNotFound{u.Scheme} - } - return factory(u) -} - -func newFileSink(u *url.URL) (Sink, error) { - if u.User != nil { - return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) - } - if u.Fragment != "" { - return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) - } - if u.RawQuery != "" { - return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) - } - // Error messages are better if we check hostname and port separately. - if u.Port() != "" { - return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) - } - if hn := u.Hostname(); hn != "" && hn != "localhost" { - return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) - } - switch u.Path { - case "stdout": - return nopCloserSink{os.Stdout}, nil - case "stderr": - return nopCloserSink{os.Stderr}, nil - } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) -} - -func normalizeScheme(s string) (string, error) { - // https://tools.ietf.org/html/rfc3986#section-3.1 - s = strings.ToLower(s) - if first := s[0]; 'a' > first || 'z' < first { - return "", errors.New("must start with a letter") - } - for i := 1; i < len(s); i++ { // iterate over bytes, not runes - c := s[i] - switch { - case 'a' <= c && c <= 'z': - continue - case '0' <= c && c <= '9': - continue - case c == '.' || c == '+' || c == '-': - continue - } - return "", fmt.Errorf("may not contain %q", c) - } - return s, nil -} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go deleted file mode 100644 index 0cf8c1dd..00000000 --- a/vendor/go.uber.org/zap/stacktrace.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "runtime" - "sync" - - "go.uber.org/zap/internal/bufferpool" -) - -var ( - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } -) - -func takeStacktrace(skip int) string { - buffer := bufferpool.Get() - defer buffer.Free() - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var numFrames int - for { - // Skip the call to runtime.Callers and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(skip+2, programCounters.pcs) - if numFrames < len(programCounters.pcs) { - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) - } - - i := 0 - frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) - - // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which - // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if i != 0 { - buffer.AppendByte('\n') - } - i++ - buffer.AppendString(frame.Function) - buffer.AppendByte('\n') - buffer.AppendByte('\t') - buffer.AppendString(frame.File) - buffer.AppendByte(':') - buffer.AppendInt(int64(frame.Line)) - } - - return buffer.String() -} - -type programCounters struct { - pcs []uintptr -} - -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} -} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go deleted file mode 100644 index 0b965198..00000000 --- a/vendor/go.uber.org/zap/sugar.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - - "go.uber.org/zap/zapcore" - - "go.uber.org/multierr" -) - -const ( - _oddNumberErrMsg = "Ignored key without a value." - _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." -) - -// A SugaredLogger wraps the base Logger functionality in a slower, but less -// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar -// method. -// -// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. -// For each log level, it exposes three methods: one for loosely-typed -// structured logging, one for println-style formatting, and one for -// printf-style formatting. For example, SugaredLoggers can produce InfoLevel -// output with Infow ("info with" structured context), Info, or Infof. -type SugaredLogger struct { - base *Logger -} - -// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring -// is quite inexpensive, so it's reasonable for a single application to use -// both Loggers and SugaredLoggers, converting between them on the boundaries -// of performance-sensitive code. -func (s *SugaredLogger) Desugar() *Logger { - base := s.base.clone() - base.callerSkip -= 2 - return base -} - -// Named adds a sub-scope to the logger's name. See Logger.Named for details. -func (s *SugaredLogger) Named(name string) *SugaredLogger { - return &SugaredLogger{base: s.base.Named(name)} -} - -// With adds a variadic number of fields to the logging context. It accepts a -// mix of strongly-typed Field objects and loosely-typed key-value pairs. When -// processing pairs, the first element of the pair is used as the field key -// and the second as the field value. -// -// For example, -// sugaredLogger.With( -// "hello", "world", -// "failure", errors.New("oh no"), -// Stack(), -// "count", 42, -// "user", User{Name: "alice"}, -// ) -// is the equivalent of -// unsugared.With( -// String("hello", "world"), -// String("failure", "oh no"), -// Stack(), -// Int("count", 42), -// Object("user", User{Name: "alice"}), -// ) -// -// Note that the keys in key-value pairs should be strings. In development, -// passing a non-string key panics. In production, the logger is more -// forgiving: a separate error is logged, but the key-value pair is skipped -// and execution continues. Passing an orphaned key triggers similar behavior: -// panics in development and errors in production. -func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { - return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} -} - -// Debug uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Debug(args ...interface{}) { - s.log(DebugLevel, "", args, nil) -} - -// Info uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Info(args ...interface{}) { - s.log(InfoLevel, "", args, nil) -} - -// Warn uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Warn(args ...interface{}) { - s.log(WarnLevel, "", args, nil) -} - -// Error uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Error(args ...interface{}) { - s.log(ErrorLevel, "", args, nil) -} - -// DPanic uses fmt.Sprint to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) -func (s *SugaredLogger) DPanic(args ...interface{}) { - s.log(DPanicLevel, "", args, nil) -} - -// Panic uses fmt.Sprint to construct and log a message, then panics. -func (s *SugaredLogger) Panic(args ...interface{}) { - s.log(PanicLevel, "", args, nil) -} - -// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. -func (s *SugaredLogger) Fatal(args ...interface{}) { - s.log(FatalLevel, "", args, nil) -} - -// Debugf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Debugf(template string, args ...interface{}) { - s.log(DebugLevel, template, args, nil) -} - -// Infof uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Infof(template string, args ...interface{}) { - s.log(InfoLevel, template, args, nil) -} - -// Warnf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Warnf(template string, args ...interface{}) { - s.log(WarnLevel, template, args, nil) -} - -// Errorf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Errorf(template string, args ...interface{}) { - s.log(ErrorLevel, template, args, nil) -} - -// DPanicf uses fmt.Sprintf to log a templated message. In development, the -// logger then panics. (See DPanicLevel for details.) -func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { - s.log(DPanicLevel, template, args, nil) -} - -// Panicf uses fmt.Sprintf to log a templated message, then panics. -func (s *SugaredLogger) Panicf(template string, args ...interface{}) { - s.log(PanicLevel, template, args, nil) -} - -// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. -func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { - s.log(FatalLevel, template, args, nil) -} - -// Debugw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -// -// When debug-level logging is disabled, this is much faster than -// s.With(keysAndValues).Debug(msg) -func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { - s.log(DebugLevel, msg, nil, keysAndValues) -} - -// Infow logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { - s.log(InfoLevel, msg, nil, keysAndValues) -} - -// Warnw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { - s.log(WarnLevel, msg, nil, keysAndValues) -} - -// Errorw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { - s.log(ErrorLevel, msg, nil, keysAndValues) -} - -// DPanicw logs a message with some additional context. In development, the -// logger then panics. (See DPanicLevel for details.) The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { - s.log(DPanicLevel, msg, nil, keysAndValues) -} - -// Panicw logs a message with some additional context, then panics. The -// variadic key-value pairs are treated as they are in With. -func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { - s.log(PanicLevel, msg, nil, keysAndValues) -} - -// Fatalw logs a message with some additional context, then calls os.Exit. The -// variadic key-value pairs are treated as they are in With. -func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { - s.log(FatalLevel, msg, nil, keysAndValues) -} - -// Sync flushes any buffered log entries. -func (s *SugaredLogger) Sync() error { - return s.base.Sync() -} - -func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { - // If logging at this level is completely disabled, skip the overhead of - // string formatting. - if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { - return - } - - msg := getMessage(template, fmtArgs) - if ce := s.base.Check(lvl, msg); ce != nil { - ce.Write(s.sweetenFields(context)...) - } -} - -// getMessage format with Sprint, Sprintf, or neither. -func getMessage(template string, fmtArgs []interface{}) string { - if len(fmtArgs) == 0 { - return template - } - - if template != "" { - return fmt.Sprintf(template, fmtArgs...) - } - - if len(fmtArgs) == 1 { - if str, ok := fmtArgs[0].(string); ok { - return str - } - } - return fmt.Sprint(fmtArgs...) -} - -func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { - if len(args) == 0 { - return nil - } - - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs - - for i := 0; i < len(args); { - // This is a strongly-typed field. Consume it and move on. - if f, ok := args[i].(Field); ok { - fields = append(fields, f) - i++ - continue - } - - // Make sure this element isn't a dangling key. - if i == len(args)-1 { - s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) - break - } - - // Consume this value and the next, treating them as a key-value pair. If the - // key isn't a string, add this pair to the slice of invalid pairs. - key, val := args[i], args[i+1] - if keyStr, ok := key.(string); !ok { - // Subsequent errors are likely, so allocate once up front. - if cap(invalid) == 0 { - invalid = make(invalidPairs, 0, len(args)/2) - } - invalid = append(invalid, invalidPair{i, key, val}) - } else { - fields = append(fields, Any(keyStr, val)) - } - i += 2 - } - - // If we encountered any invalid key-value pairs, log an error. - if len(invalid) > 0 { - s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) - } - return fields -} - -type invalidPair struct { - position int - key, value interface{} -} - -func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { - enc.AddInt64("position", int64(p.position)) - Any("key", p.key).AddTo(enc) - Any("value", p.value).AddTo(enc) - return nil -} - -type invalidPairs []invalidPair - -func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { - var err error - for i := range ps { - err = multierr.Append(err, enc.AppendObject(ps[i])) - } - return err -} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go deleted file mode 100644 index c5a1f162..00000000 --- a/vendor/go.uber.org/zap/time.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import "time" - -func timeToMillis(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond) -} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go deleted file mode 100644 index 86a709ab..00000000 --- a/vendor/go.uber.org/zap/writer.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "io" - "io/ioutil" - - "go.uber.org/zap/zapcore" - - "go.uber.org/multierr" -) - -// Open is a high-level wrapper that takes a variadic number of URLs, opens or -// creates each of the specified resources, and combines them into a locked -// WriteSyncer. It also returns any error encountered and a function to close -// any opened files. -// -// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a -// scheme and URLs with the "file" scheme. Third-party code may register -// factories for other schemes using RegisterSink. -// -// URLs with the "file" scheme must use absolute paths on the local -// filesystem. No user, password, port, fragments, or query parameters are -// allowed, and the hostname must be empty or "localhost". -// -// Since it's common to write logs to the local filesystem, URLs without a -// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without -// a scheme, the special paths "stdout" and "stderr" are interpreted as -// os.Stdout and os.Stderr. When specified without a scheme, relative file -// paths also work. -func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { - writers, close, err := open(paths) - if err != nil { - return nil, nil, err - } - - writer := CombineWriteSyncers(writers...) - return writer, close, nil -} - -func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { - writers := make([]zapcore.WriteSyncer, 0, len(paths)) - closers := make([]io.Closer, 0, len(paths)) - close := func() { - for _, c := range closers { - c.Close() - } - } - - var openErr error - for _, path := range paths { - sink, err := newSink(path) - if err != nil { - openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) - continue - } - writers = append(writers, sink) - closers = append(closers, sink) - } - if openErr != nil { - close() - return writers, nil, openErr - } - - return writers, close, nil -} - -// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a -// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op -// WriteSyncer. -// -// It's provided purely as a convenience; the result is no different from -// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. -func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { - if len(writers) == 0 { - return zapcore.AddSync(ioutil.Discard) - } - return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) -} diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go deleted file mode 100644 index ef2f7d96..00000000 --- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bufio" - "sync" - "time" - - "go.uber.org/multierr" -) - -const ( - // _defaultBufferSize specifies the default size used by Buffer. - _defaultBufferSize = 256 * 1024 // 256 kB - - // _defaultFlushInterval specifies the default flush interval for - // Buffer. - _defaultFlushInterval = 30 * time.Second -) - -// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before -// flushing them to a wrapped WriteSyncer after reaching some limit, or at some -// fixed interval--whichever comes first. -// -// BufferedWriteSyncer is safe for concurrent use. You don't need to use -// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. -type BufferedWriteSyncer struct { - // WS is the WriteSyncer around which BufferedWriteSyncer will buffer - // writes. - // - // This field is required. - WS WriteSyncer - - // Size specifies the maximum amount of data the writer will buffered - // before flushing. - // - // Defaults to 256 kB if unspecified. - Size int - - // FlushInterval specifies how often the writer should flush data if - // there have been no writes. - // - // Defaults to 30 seconds if unspecified. - FlushInterval time.Duration - - // Clock, if specified, provides control of the source of time for the - // writer. - // - // Defaults to the system clock. - Clock Clock - - // unexported fields for state - mu sync.Mutex - initialized bool // whether initialize() has run - stopped bool // whether Stop() has run - writer *bufio.Writer - ticker *time.Ticker - stop chan struct{} // closed when flushLoop should stop - done chan struct{} // closed when flushLoop has stopped -} - -func (s *BufferedWriteSyncer) initialize() { - size := s.Size - if size == 0 { - size = _defaultBufferSize - } - - flushInterval := s.FlushInterval - if flushInterval == 0 { - flushInterval = _defaultFlushInterval - } - - if s.Clock == nil { - s.Clock = DefaultClock - } - - s.ticker = s.Clock.NewTicker(flushInterval) - s.writer = bufio.NewWriterSize(s.WS, size) - s.stop = make(chan struct{}) - s.done = make(chan struct{}) - s.initialized = true - go s.flushLoop() -} - -// Write writes log data into buffer syncer directly, multiple Write calls will be batched, -// and log data will be flushed to disk when the buffer is full or periodically. -func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if !s.initialized { - s.initialize() - } - - // To avoid partial writes from being flushed, we manually flush the existing buffer if: - // * The current write doesn't fit into the buffer fully, and - // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) - if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { - if err := s.writer.Flush(); err != nil { - return 0, err - } - } - - return s.writer.Write(bs) -} - -// Sync flushes buffered log data into disk directly. -func (s *BufferedWriteSyncer) Sync() error { - s.mu.Lock() - defer s.mu.Unlock() - - var err error - if s.initialized { - err = s.writer.Flush() - } - - return multierr.Append(err, s.WS.Sync()) -} - -// flushLoop flushes the buffer at the configured interval until Stop is -// called. -func (s *BufferedWriteSyncer) flushLoop() { - defer close(s.done) - - for { - select { - case <-s.ticker.C: - // we just simply ignore error here - // because the underlying bufio writer stores any errors - // and we return any error from Sync() as part of the close - _ = s.Sync() - case <-s.stop: - return - } - } -} - -// Stop closes the buffer, cleans up background goroutines, and flushes -// remaining unwritten data. -func (s *BufferedWriteSyncer) Stop() (err error) { - var stopped bool - - // Critical section. - func() { - s.mu.Lock() - defer s.mu.Unlock() - - if !s.initialized { - return - } - - stopped = s.stopped - if stopped { - return - } - s.stopped = true - - s.ticker.Stop() - close(s.stop) // tell flushLoop to stop - <-s.done // and wait until it has - }() - - // Don't call Sync on consecutive Stops. - if !stopped { - err = s.Sync() - } - - return err -} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go deleted file mode 100644 index d2ea95b3..00000000 --- a/vendor/go.uber.org/zap/zapcore/clock.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "time" -) - -// DefaultClock is the default clock used by Zap in operations that require -// time. This clock uses the system clock for all operations. -var DefaultClock = systemClock{} - -// Clock is a source of time for logged entries. -type Clock interface { - // Now returns the current local time. - Now() time.Time - - // NewTicker returns *time.Ticker that holds a channel - // that delivers "ticks" of a clock. - NewTicker(time.Duration) *time.Ticker -} - -// systemClock implements default Clock that uses system time. -type systemClock struct{} - -func (systemClock) Now() time.Time { - return time.Now() -} - -func (systemClock) NewTicker(duration time.Duration) *time.Ticker { - return time.NewTicker(duration) -} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go deleted file mode 100644 index 2307af40..00000000 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "sync" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/internal/bufferpool" -) - -var _sliceEncoderPool = sync.Pool{ - New: func() interface{} { - return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} - }, -} - -func getSliceEncoder() *sliceArrayEncoder { - return _sliceEncoderPool.Get().(*sliceArrayEncoder) -} - -func putSliceEncoder(e *sliceArrayEncoder) { - e.elems = e.elems[:0] - _sliceEncoderPool.Put(e) -} - -type consoleEncoder struct { - *jsonEncoder -} - -// NewConsoleEncoder creates an encoder whose output is designed for human - -// rather than machine - consumption. It serializes the core log entry data -// (message, level, timestamp, etc.) in a plain-text format and leaves the -// structured context as JSON. -// -// Note that although the console encoder doesn't use the keys specified in the -// encoder configuration, it will omit any element whose key is set to the empty -// string. -func NewConsoleEncoder(cfg EncoderConfig) Encoder { - if cfg.ConsoleSeparator == "" { - // Use a default delimiter of '\t' for backwards compatibility - cfg.ConsoleSeparator = "\t" - } - return consoleEncoder{newJSONEncoder(cfg, true)} -} - -func (c consoleEncoder) Clone() Encoder { - return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} -} - -func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { - line := bufferpool.Get() - - // We don't want the entry's metadata to be quoted and escaped (if it's - // encoded as strings), which means that we can't use the JSON encoder. The - // simplest option is to use the memory encoder and fmt.Fprint. - // - // If this ever becomes a performance bottleneck, we can implement - // ArrayEncoder for our plain-text format. - arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { - c.EncodeTime(ent.Time, arr) - } - if c.LevelKey != "" && c.EncodeLevel != nil { - c.EncodeLevel(ent.Level, arr) - } - if ent.LoggerName != "" && c.NameKey != "" { - nameEncoder := c.EncodeName - - if nameEncoder == nil { - // Fall back to FullNameEncoder for backward compatibility. - nameEncoder = FullNameEncoder - } - - nameEncoder(ent.LoggerName, arr) - } - if ent.Caller.Defined { - if c.CallerKey != "" && c.EncodeCaller != nil { - c.EncodeCaller(ent.Caller, arr) - } - if c.FunctionKey != "" { - arr.AppendString(ent.Caller.Function) - } - } - for i := range arr.elems { - if i > 0 { - line.AppendString(c.ConsoleSeparator) - } - fmt.Fprint(line, arr.elems[i]) - } - putSliceEncoder(arr) - - // Add the message itself. - if c.MessageKey != "" { - c.addSeparatorIfNecessary(line) - line.AppendString(ent.Message) - } - - // Add any structured context. - c.writeContext(line, fields) - - // If there's no stacktrace key, honor that; this allows users to force - // single-line output. - if ent.Stack != "" && c.StacktraceKey != "" { - line.AppendByte('\n') - line.AppendString(ent.Stack) - } - - if c.LineEnding != "" { - line.AppendString(c.LineEnding) - } else { - line.AppendString(DefaultLineEnding) - } - return line, nil -} - -func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { - context := c.jsonEncoder.Clone().(*jsonEncoder) - defer func() { - // putJSONEncoder assumes the buffer is still used, but we write out the buffer so - // we can free it. - context.buf.Free() - putJSONEncoder(context) - }() - - addFields(context, extra) - context.closeOpenNamespaces() - if context.buf.Len() == 0 { - return - } - - c.addSeparatorIfNecessary(line) - line.AppendByte('{') - line.Write(context.buf.Bytes()) - line.AppendByte('}') -} - -func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { - if line.Len() > 0 { - line.AppendString(c.ConsoleSeparator) - } -} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go deleted file mode 100644 index a1ef8b03..00000000 --- a/vendor/go.uber.org/zap/zapcore/core.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -// Core is a minimal, fast logger interface. It's designed for library authors -// to wrap in a more user-friendly API. -type Core interface { - LevelEnabler - - // With adds structured context to the Core. - With([]Field) Core - // Check determines whether the supplied Entry should be logged (using the - // embedded LevelEnabler and possibly some extra logic). If the entry - // should be logged, the Core adds itself to the CheckedEntry and returns - // the result. - // - // Callers must use Check before calling Write. - Check(Entry, *CheckedEntry) *CheckedEntry - // Write serializes the Entry and any Fields supplied at the log site and - // writes them to their destination. - // - // If called, Write should always log the Entry and Fields; it should not - // replicate the logic of Check. - Write(Entry, []Field) error - // Sync flushes buffered logs (if any). - Sync() error -} - -type nopCore struct{} - -// NewNopCore returns a no-op Core. -func NewNopCore() Core { return nopCore{} } -func (nopCore) Enabled(Level) bool { return false } -func (n nopCore) With([]Field) Core { return n } -func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } -func (nopCore) Write(Entry, []Field) error { return nil } -func (nopCore) Sync() error { return nil } - -// NewCore creates a Core that writes logs to a WriteSyncer. -func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { - return &ioCore{ - LevelEnabler: enab, - enc: enc, - out: ws, - } -} - -type ioCore struct { - LevelEnabler - enc Encoder - out WriteSyncer -} - -func (c *ioCore) With(fields []Field) Core { - clone := c.clone() - addFields(clone.enc, fields) - return clone -} - -func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if c.Enabled(ent.Level) { - return ce.AddCore(ent, c) - } - return ce -} - -func (c *ioCore) Write(ent Entry, fields []Field) error { - buf, err := c.enc.EncodeEntry(ent, fields) - if err != nil { - return err - } - _, err = c.out.Write(buf.Bytes()) - buf.Free() - if err != nil { - return err - } - if ent.Level > ErrorLevel { - // Since we may be crashing the program, sync the output. Ignore Sync - // errors, pending a clean solution to issue #370. - c.Sync() - } - return nil -} - -func (c *ioCore) Sync() error { - return c.out.Sync() -} - -func (c *ioCore) clone() *ioCore { - return &ioCore{ - LevelEnabler: c.LevelEnabler, - enc: c.enc.Clone(), - out: c.out, - } -} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go deleted file mode 100644 index 31000e91..00000000 --- a/vendor/go.uber.org/zap/zapcore/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package zapcore defines and implements the low-level interfaces upon which -// zap is built. By providing alternate implementations of these interfaces, -// external packages can extend zap's capabilities. -package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go deleted file mode 100644 index 6601ca16..00000000 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "encoding/json" - "time" - - "go.uber.org/zap/buffer" -) - -// DefaultLineEnding defines the default line ending when writing logs. -// Alternate line endings specified in EncoderConfig can override this -// behavior. -const DefaultLineEnding = "\n" - -// OmitKey defines the key to use when callers want to remove a key from log output. -const OmitKey = "" - -// A LevelEncoder serializes a Level to a primitive type. -type LevelEncoder func(Level, PrimitiveArrayEncoder) - -// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, -// InfoLevel is serialized to "info". -func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - enc.AppendString(l.String()) -} - -// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. -// For example, InfoLevel is serialized to "info" and colored blue. -func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - s, ok := _levelToLowercaseColorString[l] - if !ok { - s = _unknownLevelColor.Add(l.String()) - } - enc.AppendString(s) -} - -// CapitalLevelEncoder serializes a Level to an all-caps string. For example, -// InfoLevel is serialized to "INFO". -func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - enc.AppendString(l.CapitalString()) -} - -// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. -// For example, InfoLevel is serialized to "INFO" and colored blue. -func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - s, ok := _levelToCapitalColorString[l] - if !ok { - s = _unknownLevelColor.Add(l.CapitalString()) - } - enc.AppendString(s) -} - -// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to -// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, -// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else -// is unmarshaled to LowercaseLevelEncoder. -func (e *LevelEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "capital": - *e = CapitalLevelEncoder - case "capitalColor": - *e = CapitalColorLevelEncoder - case "color": - *e = LowercaseColorLevelEncoder - default: - *e = LowercaseLevelEncoder - } - return nil -} - -// A TimeEncoder serializes a time.Time to a primitive type. -type TimeEncoder func(time.Time, PrimitiveArrayEncoder) - -// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds -// since the Unix epoch. -func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - nanos := t.UnixNano() - sec := float64(nanos) / float64(time.Second) - enc.AppendFloat64(sec) -} - -// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of -// milliseconds since the Unix epoch. -func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - nanos := t.UnixNano() - millis := float64(nanos) / float64(time.Millisecond) - enc.AppendFloat64(millis) -} - -// EpochNanosTimeEncoder serializes a time.Time to an integer number of -// nanoseconds since the Unix epoch. -func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendInt64(t.UnixNano()) -} - -func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { - type appendTimeEncoder interface { - AppendTimeLayout(time.Time, string) - } - - if enc, ok := enc.(appendTimeEncoder); ok { - enc.AppendTimeLayout(t, layout) - return - } - - enc.AppendString(t.Format(layout)) -} - -// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string -// with millisecond precision. -// -// If enc supports AppendTimeLayout(t time.Time,layout string), it's used -// instead of appending a pre-formatted string value. -func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) -} - -// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. -// -// If enc supports AppendTimeLayout(t time.Time,layout string), it's used -// instead of appending a pre-formatted string value. -func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - encodeTimeLayout(t, time.RFC3339, enc) -} - -// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string -// with nanosecond precision. -// -// If enc supports AppendTimeLayout(t time.Time,layout string), it's used -// instead of appending a pre-formatted string value. -func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - encodeTimeLayout(t, time.RFC3339Nano, enc) -} - -// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using -// given layout. -func TimeEncoderOfLayout(layout string) TimeEncoder { - return func(t time.Time, enc PrimitiveArrayEncoder) { - encodeTimeLayout(t, layout, enc) - } -} - -// UnmarshalText unmarshals text to a TimeEncoder. -// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. -// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. -// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. -// "millis" is unmarshaled to EpochMillisTimeEncoder. -// "nanos" is unmarshaled to EpochNanosEncoder. -// Anything else is unmarshaled to EpochTimeEncoder. -func (e *TimeEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "rfc3339nano", "RFC3339Nano": - *e = RFC3339NanoTimeEncoder - case "rfc3339", "RFC3339": - *e = RFC3339TimeEncoder - case "iso8601", "ISO8601": - *e = ISO8601TimeEncoder - case "millis": - *e = EpochMillisTimeEncoder - case "nanos": - *e = EpochNanosTimeEncoder - default: - *e = EpochTimeEncoder - } - return nil -} - -// UnmarshalYAML unmarshals YAML to a TimeEncoder. -// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. -// timeEncoder: -// layout: 06/01/02 03:04pm -// If value is string, it uses UnmarshalText. -// timeEncoder: iso8601 -func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { - var o struct { - Layout string `json:"layout" yaml:"layout"` - } - if err := unmarshal(&o); err == nil { - *e = TimeEncoderOfLayout(o.Layout) - return nil - } - - var s string - if err := unmarshal(&s); err != nil { - return err - } - return e.UnmarshalText([]byte(s)) -} - -// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. -func (e *TimeEncoder) UnmarshalJSON(data []byte) error { - return e.UnmarshalYAML(func(v interface{}) error { - return json.Unmarshal(data, v) - }) -} - -// A DurationEncoder serializes a time.Duration to a primitive type. -type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) - -// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. -func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendFloat64(float64(d) / float64(time.Second)) -} - -// NanosDurationEncoder serializes a time.Duration to an integer number of -// nanoseconds elapsed. -func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendInt64(int64(d)) -} - -// MillisDurationEncoder serializes a time.Duration to an integer number of -// milliseconds elapsed. -func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendInt64(d.Nanoseconds() / 1e6) -} - -// StringDurationEncoder serializes a time.Duration using its built-in String -// method. -func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendString(d.String()) -} - -// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled -// to StringDurationEncoder, and anything else is unmarshaled to -// NanosDurationEncoder. -func (e *DurationEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "string": - *e = StringDurationEncoder - case "nanos": - *e = NanosDurationEncoder - case "ms": - *e = MillisDurationEncoder - default: - *e = SecondsDurationEncoder - } - return nil -} - -// A CallerEncoder serializes an EntryCaller to a primitive type. -type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) - -// FullCallerEncoder serializes a caller in /full/path/to/package/file:line -// format. -func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { - // TODO: consider using a byte-oriented API to save an allocation. - enc.AppendString(caller.String()) -} - -// ShortCallerEncoder serializes a caller in package/file:line format, trimming -// all but the final directory from the full path. -func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { - // TODO: consider using a byte-oriented API to save an allocation. - enc.AppendString(caller.TrimmedPath()) -} - -// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to -// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. -func (e *CallerEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *e = FullCallerEncoder - default: - *e = ShortCallerEncoder - } - return nil -} - -// A NameEncoder serializes a period-separated logger name to a primitive -// type. -type NameEncoder func(string, PrimitiveArrayEncoder) - -// FullNameEncoder serializes the logger name as-is. -func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { - enc.AppendString(loggerName) -} - -// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is -// unmarshaled to FullNameEncoder. -func (e *NameEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *e = FullNameEncoder - default: - *e = FullNameEncoder - } - return nil -} - -// An EncoderConfig allows users to configure the concrete encoders supplied by -// zapcore. -type EncoderConfig struct { - // Set the keys used for each log entry. If any key is empty, that portion - // of the entry is omitted. - MessageKey string `json:"messageKey" yaml:"messageKey"` - LevelKey string `json:"levelKey" yaml:"levelKey"` - TimeKey string `json:"timeKey" yaml:"timeKey"` - NameKey string `json:"nameKey" yaml:"nameKey"` - CallerKey string `json:"callerKey" yaml:"callerKey"` - FunctionKey string `json:"functionKey" yaml:"functionKey"` - StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` - LineEnding string `json:"lineEnding" yaml:"lineEnding"` - // Configure the primitive representations of common complex types. For - // example, some users may want all time.Times serialized as floating-point - // seconds since epoch, while others may prefer ISO8601 strings. - EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` - EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` - EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` - EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` - // Unlike the other primitive type encoders, EncodeName is optional. The - // zero value falls back to FullNameEncoder. - EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` - // Configures the field separator used by the console encoder. Defaults - // to tab. - ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` -} - -// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a -// map- or struct-like object to the logging context. Like maps, ObjectEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ObjectEncoder interface { - // Logging-specific marshalers. - AddArray(key string, marshaler ArrayMarshaler) error - AddObject(key string, marshaler ObjectMarshaler) error - - // Built-in types. - AddBinary(key string, value []byte) // for arbitrary bytes - AddByteString(key string, value []byte) // for UTF-8 encoded bytes - AddBool(key string, value bool) - AddComplex128(key string, value complex128) - AddComplex64(key string, value complex64) - AddDuration(key string, value time.Duration) - AddFloat64(key string, value float64) - AddFloat32(key string, value float32) - AddInt(key string, value int) - AddInt64(key string, value int64) - AddInt32(key string, value int32) - AddInt16(key string, value int16) - AddInt8(key string, value int8) - AddString(key, value string) - AddTime(key string, value time.Time) - AddUint(key string, value uint) - AddUint64(key string, value uint64) - AddUint32(key string, value uint32) - AddUint16(key string, value uint16) - AddUint8(key string, value uint8) - AddUintptr(key string, value uintptr) - - // AddReflected uses reflection to serialize arbitrary objects, so it can be - // slow and allocation-heavy. - AddReflected(key string, value interface{}) error - // OpenNamespace opens an isolated namespace where all subsequent fields will - // be added. Applications can use namespaces to prevent key collisions when - // injecting loggers into sub-components or third-party libraries. - OpenNamespace(key string) -} - -// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding -// array-like objects to the logging context. Of note, it supports mixed-type -// arrays even though they aren't typical in Go. Like slices, ArrayEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ArrayEncoder interface { - // Built-in types. - PrimitiveArrayEncoder - - // Time-related types. - AppendDuration(time.Duration) - AppendTime(time.Time) - - // Logging-specific marshalers. - AppendArray(ArrayMarshaler) error - AppendObject(ObjectMarshaler) error - - // AppendReflected uses reflection to serialize arbitrary objects, so it's - // slow and allocation-heavy. - AppendReflected(value interface{}) error -} - -// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals -// only in Go's built-in types. It's included only so that Duration- and -// TimeEncoders cannot trigger infinite recursion. -type PrimitiveArrayEncoder interface { - // Built-in types. - AppendBool(bool) - AppendByteString([]byte) // for UTF-8 encoded bytes - AppendComplex128(complex128) - AppendComplex64(complex64) - AppendFloat64(float64) - AppendFloat32(float32) - AppendInt(int) - AppendInt64(int64) - AppendInt32(int32) - AppendInt16(int16) - AppendInt8(int8) - AppendString(string) - AppendUint(uint) - AppendUint64(uint64) - AppendUint32(uint32) - AppendUint16(uint16) - AppendUint8(uint8) - AppendUintptr(uintptr) -} - -// Encoder is a format-agnostic interface for all log entry marshalers. Since -// log encoders don't need to support the same wide range of use cases as -// general-purpose marshalers, it's possible to make them faster and -// lower-allocation. -// -// Implementations of the ObjectEncoder interface's methods can, of course, -// freely modify the receiver. However, the Clone and EncodeEntry methods will -// be called concurrently and shouldn't modify the receiver. -type Encoder interface { - ObjectEncoder - - // Clone copies the encoder, ensuring that adding fields to the copy doesn't - // affect the original. - Clone() Encoder - - // EncodeEntry encodes an entry and fields, along with any accumulated - // context, into a byte buffer and returns it. Any fields that are empty, - // including fields on the `Entry` type, should be omitted. - EncodeEntry(Entry, []Field) (*buffer.Buffer, error) -} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go deleted file mode 100644 index 0885505b..00000000 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "runtime" - "strings" - "sync" - "time" - - "go.uber.org/zap/internal/bufferpool" - "go.uber.org/zap/internal/exit" - - "go.uber.org/multierr" -) - -var ( - _cePool = sync.Pool{New: func() interface{} { - // Pre-allocate some space for cores. - return &CheckedEntry{ - cores: make([]Core, 4), - } - }} -) - -func getCheckedEntry() *CheckedEntry { - ce := _cePool.Get().(*CheckedEntry) - ce.reset() - return ce -} - -func putCheckedEntry(ce *CheckedEntry) { - if ce == nil { - return - } - _cePool.Put(ce) -} - -// NewEntryCaller makes an EntryCaller from the return signature of -// runtime.Caller. -func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { - if !ok { - return EntryCaller{} - } - return EntryCaller{ - PC: pc, - File: file, - Line: line, - Defined: true, - } -} - -// EntryCaller represents the caller of a logging function. -type EntryCaller struct { - Defined bool - PC uintptr - File string - Line int - Function string -} - -// String returns the full path and line number of the caller. -func (ec EntryCaller) String() string { - return ec.FullPath() -} - -// FullPath returns a /full/path/to/package/file:line description of the -// caller. -func (ec EntryCaller) FullPath() string { - if !ec.Defined { - return "undefined" - } - buf := bufferpool.Get() - buf.AppendString(ec.File) - buf.AppendByte(':') - buf.AppendInt(int64(ec.Line)) - caller := buf.String() - buf.Free() - return caller -} - -// TrimmedPath returns a package/file:line description of the caller, -// preserving only the leaf directory name and file name. -func (ec EntryCaller) TrimmedPath() string { - if !ec.Defined { - return "undefined" - } - // nb. To make sure we trim the path correctly on Windows too, we - // counter-intuitively need to use '/' and *not* os.PathSeparator here, - // because the path given originates from Go stdlib, specifically - // runtime.Caller() which (as of Mar/17) returns forward slashes even on - // Windows. - // - // See https://github.com/golang/go/issues/3335 - // and https://github.com/golang/go/issues/18151 - // - // for discussion on the issue on Go side. - // - // Find the last separator. - // - idx := strings.LastIndexByte(ec.File, '/') - if idx == -1 { - return ec.FullPath() - } - // Find the penultimate separator. - idx = strings.LastIndexByte(ec.File[:idx], '/') - if idx == -1 { - return ec.FullPath() - } - buf := bufferpool.Get() - // Keep everything after the penultimate separator. - buf.AppendString(ec.File[idx+1:]) - buf.AppendByte(':') - buf.AppendInt(int64(ec.Line)) - caller := buf.String() - buf.Free() - return caller -} - -// An Entry represents a complete log message. The entry's structured context -// is already serialized, but the log level, time, message, and call site -// information are available for inspection and modification. Any fields left -// empty will be omitted when encoding. -// -// Entries are pooled, so any functions that accept them MUST be careful not to -// retain references to them. -type Entry struct { - Level Level - Time time.Time - LoggerName string - Message string - Caller EntryCaller - Stack string -} - -// CheckWriteAction indicates what action to take after a log entry is -// processed. Actions are ordered in increasing severity. -type CheckWriteAction uint8 - -const ( - // WriteThenNoop indicates that nothing special needs to be done. It's the - // default behavior. - WriteThenNoop CheckWriteAction = iota - // WriteThenGoexit runs runtime.Goexit after Write. - WriteThenGoexit - // WriteThenPanic causes a panic after Write. - WriteThenPanic - // WriteThenFatal causes a fatal os.Exit after Write. - WriteThenFatal -) - -// CheckedEntry is an Entry together with a collection of Cores that have -// already agreed to log it. -// -// CheckedEntry references should be created by calling AddCore or Should on a -// nil *CheckedEntry. References are returned to a pool after Write, and MUST -// NOT be retained after calling their Write method. -type CheckedEntry struct { - Entry - ErrorOutput WriteSyncer - dirty bool // best-effort detection of pool misuse - should CheckWriteAction - cores []Core -} - -func (ce *CheckedEntry) reset() { - ce.Entry = Entry{} - ce.ErrorOutput = nil - ce.dirty = false - ce.should = WriteThenNoop - for i := range ce.cores { - // don't keep references to cores - ce.cores[i] = nil - } - ce.cores = ce.cores[:0] -} - -// Write writes the entry to the stored Cores, returns any errors, and returns -// the CheckedEntry reference to a pool for immediate re-use. Finally, it -// executes any required CheckWriteAction. -func (ce *CheckedEntry) Write(fields ...Field) { - if ce == nil { - return - } - - if ce.dirty { - if ce.ErrorOutput != nil { - // Make a best effort to detect unsafe re-use of this CheckedEntry. - // If the entry is dirty, log an internal error; because the - // CheckedEntry is being used after it was returned to the pool, - // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) - ce.ErrorOutput.Sync() - } - return - } - ce.dirty = true - - var err error - for i := range ce.cores { - err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) - } - if err != nil && ce.ErrorOutput != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) - ce.ErrorOutput.Sync() - } - - should, msg := ce.should, ce.Message - putCheckedEntry(ce) - - switch should { - case WriteThenPanic: - panic(msg) - case WriteThenFatal: - exit.Exit() - case WriteThenGoexit: - runtime.Goexit() - } -} - -// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be -// used by Core.Check implementations, and is safe to call on nil CheckedEntry -// references. -func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { - if ce == nil { - ce = getCheckedEntry() - ce.Entry = ent - } - ce.cores = append(ce.cores, core) - return ce -} - -// Should sets this CheckedEntry's CheckWriteAction, which controls whether a -// Core will panic or fatal after writing this log entry. Like AddCore, it's -// safe to call on nil CheckedEntry references. -func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { - if ce == nil { - ce = getCheckedEntry() - ce.Entry = ent - } - ce.should = should - return ce -} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go deleted file mode 100644 index 74919b0c..00000000 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "reflect" - "sync" -) - -// Encodes the given error into fields of an object. A field with the given -// name is added for the error message. -// -// If the error implements fmt.Formatter, a field with the name ${key}Verbose -// is also added with the full verbose error message. -// -// Finally, if the error implements errorGroup (from go.uber.org/multierr) or -// causer (from github.com/pkg/errors), a ${key}Causes field is added with an -// array of objects containing the errors this error was comprised of. -// -// { -// "error": err.Error(), -// "errorVerbose": fmt.Sprintf("%+v", err), -// "errorCauses": [ -// ... -// ], -// } -func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { - // Try to capture panics (from nil references or otherwise) when calling - // the Error() method - defer func() { - if rerr := recover(); rerr != nil { - // If it's a nil pointer, just say "". The likeliest causes are a - // error that fails to guard against nil or a nil pointer for a - // value receiver, and in either case, "" is a nice result. - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - enc.AddString(key, "") - return - } - - retErr = fmt.Errorf("PANIC=%v", rerr) - } - }() - - basic := err.Error() - enc.AddString(key, basic) - - switch e := err.(type) { - case errorGroup: - return enc.AddArray(key+"Causes", errArray(e.Errors())) - case fmt.Formatter: - verbose := fmt.Sprintf("%+v", e) - if verbose != basic { - // This is a rich error type, like those produced by - // github.com/pkg/errors. - enc.AddString(key+"Verbose", verbose) - } - } - return nil -} - -type errorGroup interface { - // Provides read-only access to the underlying list of errors, preferably - // without causing any allocs. - Errors() []error -} - -// Note that errArray and errArrayElem are very similar to the version -// implemented in the top-level error.go file. We can't re-use this because -// that would require exporting errArray as part of the zapcore API. - -// Encodes a list of errors using the standard error encoding logic. -type errArray []error - -func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { - for i := range errs { - if errs[i] == nil { - continue - } - - el := newErrArrayElem(errs[i]) - arr.AppendObject(el) - el.Free() - } - return nil -} - -var _errArrayElemPool = sync.Pool{New: func() interface{} { - return &errArrayElem{} -}} - -// Encodes any error into a {"error": ...} re-using the same errors logic. -// -// May be passed in place of an array to build a single-element array. -type errArrayElem struct{ err error } - -func newErrArrayElem(err error) *errArrayElem { - e := _errArrayElemPool.Get().(*errArrayElem) - e.err = err - return e -} - -func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { - return arr.AppendObject(e) -} - -func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { - return encodeError("error", e.err, enc) -} - -func (e *errArrayElem) Free() { - e.err = nil - _errArrayElemPool.Put(e) -} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go deleted file mode 100644 index 95bdb0a1..00000000 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bytes" - "fmt" - "math" - "reflect" - "time" -) - -// A FieldType indicates which member of the Field union struct should be used -// and how it should be serialized. -type FieldType uint8 - -const ( - // UnknownType is the default field type. Attempting to add it to an encoder will panic. - UnknownType FieldType = iota - // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. - ArrayMarshalerType - // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. - ObjectMarshalerType - // BinaryType indicates that the field carries an opaque binary blob. - BinaryType - // BoolType indicates that the field carries a bool. - BoolType - // ByteStringType indicates that the field carries UTF-8 encoded bytes. - ByteStringType - // Complex128Type indicates that the field carries a complex128. - Complex128Type - // Complex64Type indicates that the field carries a complex128. - Complex64Type - // DurationType indicates that the field carries a time.Duration. - DurationType - // Float64Type indicates that the field carries a float64. - Float64Type - // Float32Type indicates that the field carries a float32. - Float32Type - // Int64Type indicates that the field carries an int64. - Int64Type - // Int32Type indicates that the field carries an int32. - Int32Type - // Int16Type indicates that the field carries an int16. - Int16Type - // Int8Type indicates that the field carries an int8. - Int8Type - // StringType indicates that the field carries a string. - StringType - // TimeType indicates that the field carries a time.Time that is - // representable by a UnixNano() stored as an int64. - TimeType - // TimeFullType indicates that the field carries a time.Time stored as-is. - TimeFullType - // Uint64Type indicates that the field carries a uint64. - Uint64Type - // Uint32Type indicates that the field carries a uint32. - Uint32Type - // Uint16Type indicates that the field carries a uint16. - Uint16Type - // Uint8Type indicates that the field carries a uint8. - Uint8Type - // UintptrType indicates that the field carries a uintptr. - UintptrType - // ReflectType indicates that the field carries an interface{}, which should - // be serialized using reflection. - ReflectType - // NamespaceType signals the beginning of an isolated namespace. All - // subsequent fields should be added to the new namespace. - NamespaceType - // StringerType indicates that the field carries a fmt.Stringer. - StringerType - // ErrorType indicates that the field carries an error. - ErrorType - // SkipType indicates that the field is a no-op. - SkipType - - // InlineMarshalerType indicates that the field carries an ObjectMarshaler - // that should be inlined. - InlineMarshalerType -) - -// A Field is a marshaling operation used to add a key-value pair to a logger's -// context. Most fields are lazily marshaled, so it's inexpensive to add fields -// to disabled debug-level log statements. -type Field struct { - Key string - Type FieldType - Integer int64 - String string - Interface interface{} -} - -// AddTo exports a field through the ObjectEncoder interface. It's primarily -// useful to library authors, and shouldn't be necessary in most applications. -func (f Field) AddTo(enc ObjectEncoder) { - var err error - - switch f.Type { - case ArrayMarshalerType: - err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) - case ObjectMarshalerType: - err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) - case InlineMarshalerType: - err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) - case BinaryType: - enc.AddBinary(f.Key, f.Interface.([]byte)) - case BoolType: - enc.AddBool(f.Key, f.Integer == 1) - case ByteStringType: - enc.AddByteString(f.Key, f.Interface.([]byte)) - case Complex128Type: - enc.AddComplex128(f.Key, f.Interface.(complex128)) - case Complex64Type: - enc.AddComplex64(f.Key, f.Interface.(complex64)) - case DurationType: - enc.AddDuration(f.Key, time.Duration(f.Integer)) - case Float64Type: - enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) - case Float32Type: - enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) - case Int64Type: - enc.AddInt64(f.Key, f.Integer) - case Int32Type: - enc.AddInt32(f.Key, int32(f.Integer)) - case Int16Type: - enc.AddInt16(f.Key, int16(f.Integer)) - case Int8Type: - enc.AddInt8(f.Key, int8(f.Integer)) - case StringType: - enc.AddString(f.Key, f.String) - case TimeType: - if f.Interface != nil { - enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) - } else { - // Fall back to UTC if location is nil. - enc.AddTime(f.Key, time.Unix(0, f.Integer)) - } - case TimeFullType: - enc.AddTime(f.Key, f.Interface.(time.Time)) - case Uint64Type: - enc.AddUint64(f.Key, uint64(f.Integer)) - case Uint32Type: - enc.AddUint32(f.Key, uint32(f.Integer)) - case Uint16Type: - enc.AddUint16(f.Key, uint16(f.Integer)) - case Uint8Type: - enc.AddUint8(f.Key, uint8(f.Integer)) - case UintptrType: - enc.AddUintptr(f.Key, uintptr(f.Integer)) - case ReflectType: - err = enc.AddReflected(f.Key, f.Interface) - case NamespaceType: - enc.OpenNamespace(f.Key) - case StringerType: - err = encodeStringer(f.Key, f.Interface, enc) - case ErrorType: - err = encodeError(f.Key, f.Interface.(error), enc) - case SkipType: - break - default: - panic(fmt.Sprintf("unknown field type: %v", f)) - } - - if err != nil { - enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) - } -} - -// Equals returns whether two fields are equal. For non-primitive types such as -// errors, marshalers, or reflect types, it uses reflect.DeepEqual. -func (f Field) Equals(other Field) bool { - if f.Type != other.Type { - return false - } - if f.Key != other.Key { - return false - } - - switch f.Type { - case BinaryType, ByteStringType: - return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) - case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: - return reflect.DeepEqual(f.Interface, other.Interface) - default: - return f == other - } -} - -func addFields(enc ObjectEncoder, fields []Field) { - for i := range fields { - fields[i].AddTo(enc) - } -} - -func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { - // Try to capture panics (from nil references or otherwise) when calling - // the String() method, similar to https://golang.org/src/fmt/print.go#L540 - defer func() { - if err := recover(); err != nil { - // If it's a nil pointer, just say "". The likeliest causes are a - // Stringer that fails to guard against nil or a nil pointer for a - // value receiver, and in either case, "" is a nice result. - if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { - enc.AddString(key, "") - return - } - - retErr = fmt.Errorf("PANIC=%v", err) - } - }() - - enc.AddString(key, stringer.(fmt.Stringer).String()) - return nil -} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go deleted file mode 100644 index 5db4afb3..00000000 --- a/vendor/go.uber.org/zap/zapcore/hook.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/multierr" - -type hooked struct { - Core - funcs []func(Entry) error -} - -// RegisterHooks wraps a Core and runs a collection of user-defined callback -// hooks each time a message is logged. Execution of the callbacks is blocking. -// -// This offers users an easy way to register simple callbacks (e.g., metrics -// collection) without implementing the full Core interface. -func RegisterHooks(core Core, hooks ...func(Entry) error) Core { - funcs := append([]func(Entry) error{}, hooks...) - return &hooked{ - Core: core, - funcs: funcs, - } -} - -func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - // Let the wrapped Core decide whether to log this message or not. This - // also gives the downstream a chance to register itself directly with the - // CheckedEntry. - if downstream := h.Core.Check(ent, ce); downstream != nil { - return downstream.AddCore(ent, h) - } - return ce -} - -func (h *hooked) With(fields []Field) Core { - return &hooked{ - Core: h.Core.With(fields), - funcs: h.funcs, - } -} - -func (h *hooked) Write(ent Entry, _ []Field) error { - // Since our downstream had a chance to register itself directly with the - // CheckedMessage, we don't need to call it here. - var err error - for i := range h.funcs { - err = multierr.Append(err, h.funcs[i](ent)) - } - return err -} diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go deleted file mode 100644 index 5a174926..00000000 --- a/vendor/go.uber.org/zap/zapcore/increase_level.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "fmt" - -type levelFilterCore struct { - core Core - level LevelEnabler -} - -// NewIncreaseLevelCore creates a core that can be used to increase the level of -// an existing Core. It cannot be used to decrease the logging level, as it acts -// as a filter before calling the underlying core. If level decreases the log level, -// an error is returned. -func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { - for l := _maxLevel; l >= _minLevel; l-- { - if !core.Enabled(l) && level.Enabled(l) { - return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) - } - } - - return &levelFilterCore{core, level}, nil -} - -func (c *levelFilterCore) Enabled(lvl Level) bool { - return c.level.Enabled(lvl) -} - -func (c *levelFilterCore) With(fields []Field) Core { - return &levelFilterCore{c.core.With(fields), c.level} -} - -func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if !c.Enabled(ent.Level) { - return ce - } - - return c.core.Check(ent, ce) -} - -func (c *levelFilterCore) Write(ent Entry, fields []Field) error { - return c.core.Write(ent, fields) -} - -func (c *levelFilterCore) Sync() error { - return c.core.Sync() -} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go deleted file mode 100644 index af220d9b..00000000 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "encoding/base64" - "encoding/json" - "math" - "sync" - "time" - "unicode/utf8" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/internal/bufferpool" -) - -// For JSON-escaping; see jsonEncoder.safeAddString below. -const _hex = "0123456789abcdef" - -var _jsonPool = sync.Pool{New: func() interface{} { - return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} - -func putJSONEncoder(enc *jsonEncoder) { - if enc.reflectBuf != nil { - enc.reflectBuf.Free() - } - enc.EncoderConfig = nil - enc.buf = nil - enc.spaced = false - enc.openNamespaces = 0 - enc.reflectBuf = nil - enc.reflectEnc = nil - _jsonPool.Put(enc) -} - -type jsonEncoder struct { - *EncoderConfig - buf *buffer.Buffer - spaced bool // include spaces after colons and commas - openNamespaces int - - // for encoding generic values by reflection - reflectBuf *buffer.Buffer - reflectEnc *json.Encoder -} - -// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder -// appropriately escapes all field keys and values. -// -// Note that the encoder doesn't deduplicate keys, so it's possible to produce -// a message like -// {"foo":"bar","foo":"baz"} -// This is permitted by the JSON specification, but not encouraged. Many -// libraries will ignore duplicate key-value pairs (typically keeping the last -// pair) when unmarshaling, but users should attempt to avoid adding duplicate -// keys. -func NewJSONEncoder(cfg EncoderConfig) Encoder { - return newJSONEncoder(cfg, false) -} - -func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { - return &jsonEncoder{ - EncoderConfig: &cfg, - buf: bufferpool.Get(), - spaced: spaced, - } -} - -func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { - enc.addKey(key) - return enc.AppendArray(arr) -} - -func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { - enc.addKey(key) - return enc.AppendObject(obj) -} - -func (enc *jsonEncoder) AddBinary(key string, val []byte) { - enc.AddString(key, base64.StdEncoding.EncodeToString(val)) -} - -func (enc *jsonEncoder) AddByteString(key string, val []byte) { - enc.addKey(key) - enc.AppendByteString(val) -} - -func (enc *jsonEncoder) AddBool(key string, val bool) { - enc.addKey(key) - enc.AppendBool(val) -} - -func (enc *jsonEncoder) AddComplex128(key string, val complex128) { - enc.addKey(key) - enc.AppendComplex128(val) -} - -func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { - enc.addKey(key) - enc.AppendDuration(val) -} - -func (enc *jsonEncoder) AddFloat64(key string, val float64) { - enc.addKey(key) - enc.AppendFloat64(val) -} - -func (enc *jsonEncoder) AddFloat32(key string, val float32) { - enc.addKey(key) - enc.AppendFloat32(val) -} - -func (enc *jsonEncoder) AddInt64(key string, val int64) { - enc.addKey(key) - enc.AppendInt64(val) -} - -func (enc *jsonEncoder) resetReflectBuf() { - if enc.reflectBuf == nil { - enc.reflectBuf = bufferpool.Get() - enc.reflectEnc = json.NewEncoder(enc.reflectBuf) - - // For consistency with our custom JSON encoder. - enc.reflectEnc.SetEscapeHTML(false) - } else { - enc.reflectBuf.Reset() - } -} - -var nullLiteralBytes = []byte("null") - -// Only invoke the standard JSON encoder if there is actually something to -// encode; otherwise write JSON null literal directly. -func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { - if obj == nil { - return nullLiteralBytes, nil - } - enc.resetReflectBuf() - if err := enc.reflectEnc.Encode(obj); err != nil { - return nil, err - } - enc.reflectBuf.TrimNewline() - return enc.reflectBuf.Bytes(), nil -} - -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { - valueBytes, err := enc.encodeReflected(obj) - if err != nil { - return err - } - enc.addKey(key) - _, err = enc.buf.Write(valueBytes) - return err -} - -func (enc *jsonEncoder) OpenNamespace(key string) { - enc.addKey(key) - enc.buf.AppendByte('{') - enc.openNamespaces++ -} - -func (enc *jsonEncoder) AddString(key, val string) { - enc.addKey(key) - enc.AppendString(val) -} - -func (enc *jsonEncoder) AddTime(key string, val time.Time) { - enc.addKey(key) - enc.AppendTime(val) -} - -func (enc *jsonEncoder) AddUint64(key string, val uint64) { - enc.addKey(key) - enc.AppendUint64(val) -} - -func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('[') - err := arr.MarshalLogArray(enc) - enc.buf.AppendByte(']') - return err -} - -func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('{') - err := obj.MarshalLogObject(enc) - enc.buf.AppendByte('}') - return err -} - -func (enc *jsonEncoder) AppendBool(val bool) { - enc.addElementSeparator() - enc.buf.AppendBool(val) -} - -func (enc *jsonEncoder) AppendByteString(val []byte) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddByteString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendComplex128(val complex128) { - enc.addElementSeparator() - // Cast to a platform-independent, fixed-size type. - r, i := float64(real(val)), float64(imag(val)) - enc.buf.AppendByte('"') - // Because we're always in a quoted string, we can use strconv without - // special-casing NaN and +/-Inf. - enc.buf.AppendFloat(r, 64) - // If imaginary part is less than 0, minus (-) sign is added by default - // by AppendFloat. - if i >= 0 { - enc.buf.AppendByte('+') - } - enc.buf.AppendFloat(i, 64) - enc.buf.AppendByte('i') - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendDuration(val time.Duration) { - cur := enc.buf.Len() - if e := enc.EncodeDuration; e != nil { - e(val, enc) - } - if cur == enc.buf.Len() { - // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep - // JSON valid. - enc.AppendInt64(int64(val)) - } -} - -func (enc *jsonEncoder) AppendInt64(val int64) { - enc.addElementSeparator() - enc.buf.AppendInt(val) -} - -func (enc *jsonEncoder) AppendReflected(val interface{}) error { - valueBytes, err := enc.encodeReflected(val) - if err != nil { - return err - } - enc.addElementSeparator() - _, err = enc.buf.Write(valueBytes) - return err -} - -func (enc *jsonEncoder) AppendString(val string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.buf.AppendTime(time, layout) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendTime(val time.Time) { - cur := enc.buf.Len() - if e := enc.EncodeTime; e != nil { - e(val, enc) - } - if cur == enc.buf.Len() { - // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep - // output JSON valid. - enc.AppendInt64(val.UnixNano()) - } -} - -func (enc *jsonEncoder) AppendUint64(val uint64) { - enc.addElementSeparator() - enc.buf.AppendUint(val) -} - -func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } -func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } -func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } -func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } - -func (enc *jsonEncoder) Clone() Encoder { - clone := enc.clone() - clone.buf.Write(enc.buf.Bytes()) - return clone -} - -func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() - clone.EncoderConfig = enc.EncoderConfig - clone.spaced = enc.spaced - clone.openNamespaces = enc.openNamespaces - clone.buf = bufferpool.Get() - return clone -} - -func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { - final := enc.clone() - final.buf.AppendByte('{') - - if final.LevelKey != "" { - final.addKey(final.LevelKey) - cur := final.buf.Len() - final.EncodeLevel(ent.Level, final) - if cur == final.buf.Len() { - // User-supplied EncodeLevel was a no-op. Fall back to strings to keep - // output JSON valid. - final.AppendString(ent.Level.String()) - } - } - if final.TimeKey != "" { - final.AddTime(final.TimeKey, ent.Time) - } - if ent.LoggerName != "" && final.NameKey != "" { - final.addKey(final.NameKey) - cur := final.buf.Len() - nameEncoder := final.EncodeName - - // if no name encoder provided, fall back to FullNameEncoder for backwards - // compatibility - if nameEncoder == nil { - nameEncoder = FullNameEncoder - } - - nameEncoder(ent.LoggerName, final) - if cur == final.buf.Len() { - // User-supplied EncodeName was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.LoggerName) - } - } - if ent.Caller.Defined { - if final.CallerKey != "" { - final.addKey(final.CallerKey) - cur := final.buf.Len() - final.EncodeCaller(ent.Caller, final) - if cur == final.buf.Len() { - // User-supplied EncodeCaller was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.Caller.String()) - } - } - if final.FunctionKey != "" { - final.addKey(final.FunctionKey) - final.AppendString(ent.Caller.Function) - } - } - if final.MessageKey != "" { - final.addKey(enc.MessageKey) - final.AppendString(ent.Message) - } - if enc.buf.Len() > 0 { - final.addElementSeparator() - final.buf.Write(enc.buf.Bytes()) - } - addFields(final, fields) - final.closeOpenNamespaces() - if ent.Stack != "" && final.StacktraceKey != "" { - final.AddString(final.StacktraceKey, ent.Stack) - } - final.buf.AppendByte('}') - if final.LineEnding != "" { - final.buf.AppendString(final.LineEnding) - } else { - final.buf.AppendString(DefaultLineEnding) - } - - ret := final.buf - putJSONEncoder(final) - return ret, nil -} - -func (enc *jsonEncoder) truncate() { - enc.buf.Reset() -} - -func (enc *jsonEncoder) closeOpenNamespaces() { - for i := 0; i < enc.openNamespaces; i++ { - enc.buf.AppendByte('}') - } -} - -func (enc *jsonEncoder) addKey(key string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(key) - enc.buf.AppendByte('"') - enc.buf.AppendByte(':') - if enc.spaced { - enc.buf.AppendByte(' ') - } -} - -func (enc *jsonEncoder) addElementSeparator() { - last := enc.buf.Len() - 1 - if last < 0 { - return - } - switch enc.buf.Bytes()[last] { - case '{', '[', ':', ',', ' ': - return - default: - enc.buf.AppendByte(',') - if enc.spaced { - enc.buf.AppendByte(' ') - } - } -} - -func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { - enc.addElementSeparator() - switch { - case math.IsNaN(val): - enc.buf.AppendString(`"NaN"`) - case math.IsInf(val, 1): - enc.buf.AppendString(`"+Inf"`) - case math.IsInf(val, -1): - enc.buf.AppendString(`"-Inf"`) - default: - enc.buf.AppendFloat(val, bitSize) - } -} - -// safeAddString JSON-escapes a string and appends it to the internal buffer. -// Unlike the standard library's encoder, it doesn't attempt to protect the -// user from browser vulnerabilities or JSONP-related problems. -func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } -} - -// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. -func (enc *jsonEncoder) safeAddByteString(s []byte) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) - } - return true -} - -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false -} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go deleted file mode 100644 index e575c9f4..00000000 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bytes" - "errors" - "fmt" -) - -var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") - -// A Level is a logging priority. Higher levels are more important. -type Level int8 - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DebugLevel Level = iota - 1 - // InfoLevel is the default logging priority. - InfoLevel - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WarnLevel - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ErrorLevel - // DPanicLevel logs are particularly important errors. In development the - // logger panics after writing the message. - DPanicLevel - // PanicLevel logs a message, then panics. - PanicLevel - // FatalLevel logs a message, then calls os.Exit(1). - FatalLevel - - _minLevel = DebugLevel - _maxLevel = FatalLevel -) - -// String returns a lower-case ASCII representation of the log level. -func (l Level) String() string { - switch l { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warn" - case ErrorLevel: - return "error" - case DPanicLevel: - return "dpanic" - case PanicLevel: - return "panic" - case FatalLevel: - return "fatal" - default: - return fmt.Sprintf("Level(%d)", l) - } -} - -// CapitalString returns an all-caps ASCII representation of the log level. -func (l Level) CapitalString() string { - // Printing levels in all-caps is common enough that we should export this - // functionality. - switch l { - case DebugLevel: - return "DEBUG" - case InfoLevel: - return "INFO" - case WarnLevel: - return "WARN" - case ErrorLevel: - return "ERROR" - case DPanicLevel: - return "DPANIC" - case PanicLevel: - return "PANIC" - case FatalLevel: - return "FATAL" - default: - return fmt.Sprintf("LEVEL(%d)", l) - } -} - -// MarshalText marshals the Level to text. Note that the text representation -// drops the -Level suffix (see example). -func (l Level) MarshalText() ([]byte, error) { - return []byte(l.String()), nil -} - -// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText -// expects the text representation of a Level to drop the -Level suffix (see -// example). -// -// In particular, this makes it easy to configure logging levels using YAML, -// TOML, or JSON files. -func (l *Level) UnmarshalText(text []byte) error { - if l == nil { - return errUnmarshalNilLevel - } - if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { - return fmt.Errorf("unrecognized level: %q", text) - } - return nil -} - -func (l *Level) unmarshalText(text []byte) bool { - switch string(text) { - case "debug", "DEBUG": - *l = DebugLevel - case "info", "INFO", "": // make the zero value useful - *l = InfoLevel - case "warn", "WARN": - *l = WarnLevel - case "error", "ERROR": - *l = ErrorLevel - case "dpanic", "DPANIC": - *l = DPanicLevel - case "panic", "PANIC": - *l = PanicLevel - case "fatal", "FATAL": - *l = FatalLevel - default: - return false - } - return true -} - -// Set sets the level for the flag.Value interface. -func (l *Level) Set(s string) error { - return l.UnmarshalText([]byte(s)) -} - -// Get gets the level for the flag.Getter interface. -func (l *Level) Get() interface{} { - return *l -} - -// Enabled returns true if the given level is at or above this level. -func (l Level) Enabled(lvl Level) bool { - return lvl >= l -} - -// LevelEnabler decides whether a given logging level is enabled when logging a -// message. -// -// Enablers are intended to be used to implement deterministic filters; -// concerns like sampling are better implemented as a Core. -// -// Each concrete Level value implements a static LevelEnabler which returns -// true for itself and all higher logging levels. For example WarnLevel.Enabled() -// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and -// FatalLevel, but return false for InfoLevel and DebugLevel. -type LevelEnabler interface { - Enabled(Level) bool -} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go deleted file mode 100644 index 7af8dadc..00000000 --- a/vendor/go.uber.org/zap/zapcore/level_strings.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/zap/internal/color" - -var ( - _levelToColor = map[Level]color.Color{ - DebugLevel: color.Magenta, - InfoLevel: color.Blue, - WarnLevel: color.Yellow, - ErrorLevel: color.Red, - DPanicLevel: color.Red, - PanicLevel: color.Red, - FatalLevel: color.Red, - } - _unknownLevelColor = color.Red - - _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) - _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) -) - -func init() { - for level, color := range _levelToColor { - _levelToLowercaseColorString[level] = color.Add(level.String()) - _levelToCapitalColorString[level] = color.Add(level.CapitalString()) - } -} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go deleted file mode 100644 index c3c55ba0..00000000 --- a/vendor/go.uber.org/zap/zapcore/marshaler.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -// ObjectMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -// -// Note: ObjectMarshaler is only used when zap.Object is used or when -// passed directly to zap.Any. It is not used when reflection-based -// encoding is used. -type ObjectMarshaler interface { - MarshalLogObject(ObjectEncoder) error -} - -// ObjectMarshalerFunc is a type adapter that turns a function into an -// ObjectMarshaler. -type ObjectMarshalerFunc func(ObjectEncoder) error - -// MarshalLogObject calls the underlying function. -func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { - return f(enc) -} - -// ArrayMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -// -// Note: ArrayMarshaler is only used when zap.Array is used or when -// passed directly to zap.Any. It is not used when reflection-based -// encoding is used. -type ArrayMarshaler interface { - MarshalLogArray(ArrayEncoder) error -} - -// ArrayMarshalerFunc is a type adapter that turns a function into an -// ArrayMarshaler. -type ArrayMarshalerFunc func(ArrayEncoder) error - -// MarshalLogArray calls the underlying function. -func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { - return f(enc) -} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go deleted file mode 100644 index dfead082..00000000 --- a/vendor/go.uber.org/zap/zapcore/memory_encoder.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "time" - -// MapObjectEncoder is an ObjectEncoder backed by a simple -// map[string]interface{}. It's not fast enough for production use, but it's -// helpful in tests. -type MapObjectEncoder struct { - // Fields contains the entire encoded log context. - Fields map[string]interface{} - // cur is a pointer to the namespace we're currently writing to. - cur map[string]interface{} -} - -// NewMapObjectEncoder creates a new map-backed ObjectEncoder. -func NewMapObjectEncoder() *MapObjectEncoder { - m := make(map[string]interface{}) - return &MapObjectEncoder{ - Fields: m, - cur: m, - } -} - -// AddArray implements ObjectEncoder. -func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { - arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} - err := v.MarshalLogArray(arr) - m.cur[key] = arr.elems - return err -} - -// AddObject implements ObjectEncoder. -func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { - newMap := NewMapObjectEncoder() - m.cur[k] = newMap.Fields - return v.MarshalLogObject(newMap) -} - -// AddBinary implements ObjectEncoder. -func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } - -// AddByteString implements ObjectEncoder. -func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } - -// AddBool implements ObjectEncoder. -func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } - -// AddDuration implements ObjectEncoder. -func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } - -// AddComplex128 implements ObjectEncoder. -func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } - -// AddComplex64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } - -// AddFloat64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } - -// AddFloat32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } - -// AddInt implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } - -// AddInt64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } - -// AddInt32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } - -// AddInt16 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } - -// AddInt8 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } - -// AddString implements ObjectEncoder. -func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } - -// AddTime implements ObjectEncoder. -func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } - -// AddUint implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } - -// AddUint64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } - -// AddUint32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } - -// AddUint16 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } - -// AddUint8 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } - -// AddUintptr implements ObjectEncoder. -func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } - -// AddReflected implements ObjectEncoder. -func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { - m.cur[k] = v - return nil -} - -// OpenNamespace implements ObjectEncoder. -func (m *MapObjectEncoder) OpenNamespace(k string) { - ns := make(map[string]interface{}) - m.cur[k] = ns - m.cur = ns -} - -// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like -// the MapObjectEncoder, it's not designed for production use. -type sliceArrayEncoder struct { - elems []interface{} -} - -func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { - enc := &sliceArrayEncoder{} - err := v.MarshalLogArray(enc) - s.elems = append(s.elems, enc.elems) - return err -} - -func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { - m := NewMapObjectEncoder() - err := v.MarshalLogObject(m) - s.elems = append(s.elems, m.Fields) - return err -} - -func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { - s.elems = append(s.elems, v) - return nil -} - -func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } -func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go deleted file mode 100644 index 31ed96e1..00000000 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "time" - - "go.uber.org/atomic" -) - -const ( - _numLevels = _maxLevel - _minLevel + 1 - _countersPerLevel = 4096 -) - -type counter struct { - resetAt atomic.Int64 - counter atomic.Uint64 -} - -type counters [_numLevels][_countersPerLevel]counter - -func newCounters() *counters { - return &counters{} -} - -func (cs *counters) get(lvl Level, key string) *counter { - i := lvl - _minLevel - j := fnv32a(key) % _countersPerLevel - return &cs[i][j] -} - -// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc -func fnv32a(s string) uint32 { - const ( - offset32 = 2166136261 - prime32 = 16777619 - ) - hash := uint32(offset32) - for i := 0; i < len(s); i++ { - hash ^= uint32(s[i]) - hash *= prime32 - } - return hash -} - -func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { - tn := t.UnixNano() - resetAfter := c.resetAt.Load() - if resetAfter > tn { - return c.counter.Inc() - } - - c.counter.Store(1) - - newResetAfter := tn + tick.Nanoseconds() - if !c.resetAt.CAS(resetAfter, newResetAfter) { - // We raced with another goroutine trying to reset, and it also reset - // the counter to 1, so we need to reincrement the counter. - return c.counter.Inc() - } - - return 1 -} - -// SamplingDecision is a decision represented as a bit field made by sampler. -// More decisions may be added in the future. -type SamplingDecision uint32 - -const ( - // LogDropped indicates that the Sampler dropped a log entry. - LogDropped SamplingDecision = 1 << iota - // LogSampled indicates that the Sampler sampled a log entry. - LogSampled -) - -// optionFunc wraps a func so it satisfies the SamplerOption interface. -type optionFunc func(*sampler) - -func (f optionFunc) apply(s *sampler) { - f(s) -} - -// SamplerOption configures a Sampler. -type SamplerOption interface { - apply(*sampler) -} - -// nopSamplingHook is the default hook used by sampler. -func nopSamplingHook(Entry, SamplingDecision) {} - -// SamplerHook registers a function which will be called when Sampler makes a -// decision. -// -// This hook may be used to get visibility into the performance of the sampler. -// For example, use it to track metrics of dropped versus sampled logs. -// -// var dropped atomic.Int64 -// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { -// if dec&zapcore.LogDropped > 0 { -// dropped.Inc() -// } -// }) -func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { - return optionFunc(func(s *sampler) { - s.hook = hook - }) -} - -// NewSamplerWithOptions creates a Core that samples incoming entries, which -// caps the CPU and I/O load of logging while attempting to preserve a -// representative subset of your logs. -// -// Zap samples by logging the first N entries with a given level and message -// each tick. If more Entries with the same level and message are seen during -// the same interval, every Mth message is logged and the rest are dropped. -// -// Sampler can be configured to report sampling decisions with the SamplerHook -// option. -// -// Keep in mind that zap's sampling implementation is optimized for speed over -// absolute precision; under load, each tick may be slightly over- or -// under-sampled. -func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { - s := &sampler{ - Core: core, - tick: tick, - counts: newCounters(), - first: uint64(first), - thereafter: uint64(thereafter), - hook: nopSamplingHook, - } - for _, opt := range opts { - opt.apply(s) - } - - return s -} - -type sampler struct { - Core - - counts *counters - tick time.Duration - first, thereafter uint64 - hook func(Entry, SamplingDecision) -} - -// NewSampler creates a Core that samples incoming entries, which -// caps the CPU and I/O load of logging while attempting to preserve a -// representative subset of your logs. -// -// Zap samples by logging the first N entries with a given level and message -// each tick. If more Entries with the same level and message are seen during -// the same interval, every Mth message is logged and the rest are dropped. -// -// Keep in mind that zap's sampling implementation is optimized for speed over -// absolute precision; under load, each tick may be slightly over- or -// under-sampled. -// -// Deprecated: use NewSamplerWithOptions. -func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { - return NewSamplerWithOptions(core, tick, first, thereafter) -} - -func (s *sampler) With(fields []Field) Core { - return &sampler{ - Core: s.Core.With(fields), - tick: s.tick, - counts: s.counts, - first: s.first, - thereafter: s.thereafter, - hook: s.hook, - } -} - -func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if !s.Enabled(ent.Level) { - return ce - } - - if ent.Level >= _minLevel && ent.Level <= _maxLevel { - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - s.hook(ent, LogDropped) - return ce - } - s.hook(ent, LogSampled) - } - return s.Core.Check(ent, ce) -} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go deleted file mode 100644 index 07a32eef..00000000 --- a/vendor/go.uber.org/zap/zapcore/tee.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/multierr" - -type multiCore []Core - -// NewTee creates a Core that duplicates log entries into two or more -// underlying Cores. -// -// Calling it with a single Core returns the input unchanged, and calling -// it with no input returns a no-op Core. -func NewTee(cores ...Core) Core { - switch len(cores) { - case 0: - return NewNopCore() - case 1: - return cores[0] - default: - return multiCore(cores) - } -} - -func (mc multiCore) With(fields []Field) Core { - clone := make(multiCore, len(mc)) - for i := range mc { - clone[i] = mc[i].With(fields) - } - return clone -} - -func (mc multiCore) Enabled(lvl Level) bool { - for i := range mc { - if mc[i].Enabled(lvl) { - return true - } - } - return false -} - -func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - for i := range mc { - ce = mc[i].Check(ent, ce) - } - return ce -} - -func (mc multiCore) Write(ent Entry, fields []Field) error { - var err error - for i := range mc { - err = multierr.Append(err, mc[i].Write(ent, fields)) - } - return err -} - -func (mc multiCore) Sync() error { - var err error - for i := range mc { - err = multierr.Append(err, mc[i].Sync()) - } - return err -} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go deleted file mode 100644 index d4a1af3d..00000000 --- a/vendor/go.uber.org/zap/zapcore/write_syncer.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "io" - "sync" - - "go.uber.org/multierr" -) - -// A WriteSyncer is an io.Writer that can also flush any buffered data. Note -// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. -type WriteSyncer interface { - io.Writer - Sync() error -} - -// AddSync converts an io.Writer to a WriteSyncer. It attempts to be -// intelligent: if the concrete type of the io.Writer implements WriteSyncer, -// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. -func AddSync(w io.Writer) WriteSyncer { - switch w := w.(type) { - case WriteSyncer: - return w - default: - return writerWrapper{w} - } -} - -type lockedWriteSyncer struct { - sync.Mutex - ws WriteSyncer -} - -// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In -// particular, *os.Files must be locked before use. -func Lock(ws WriteSyncer) WriteSyncer { - if _, ok := ws.(*lockedWriteSyncer); ok { - // no need to layer on another lock - return ws - } - return &lockedWriteSyncer{ws: ws} -} - -func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { - s.Lock() - n, err := s.ws.Write(bs) - s.Unlock() - return n, err -} - -func (s *lockedWriteSyncer) Sync() error { - s.Lock() - err := s.ws.Sync() - s.Unlock() - return err -} - -type writerWrapper struct { - io.Writer -} - -func (w writerWrapper) Sync() error { - return nil -} - -type multiWriteSyncer []WriteSyncer - -// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes -// and sync calls, much like io.MultiWriter. -func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { - if len(ws) == 1 { - return ws[0] - } - return multiWriteSyncer(ws) -} - -// See https://golang.org/src/io/multi.go -// When not all underlying syncers write the same number of bytes, -// the smallest number is returned even though Write() is called on -// all of them. -func (ws multiWriteSyncer) Write(p []byte) (int, error) { - var writeErr error - nWritten := 0 - for _, w := range ws { - n, err := w.Write(p) - writeErr = multierr.Append(writeErr, err) - if nWritten == 0 && n != 0 { - nWritten = n - } else if n < nWritten { - nWritten = n - } - } - return nWritten, writeErr -} - -func (ws multiWriteSyncer) Sync() error { - var err error - for _, w := range ws { - err = multierr.Append(err, w.Sync()) - } - return err -} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 00000000..29f0a2de --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,283 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// # Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// # Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 00000000..10f46948 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 00000000..a014ac92 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,61 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 00000000..b2cc0515 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 00000000..a481b224 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 00000000..167c59d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 00000000..d2e98d42 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 00000000..56bfaaa1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 00000000..4b9daa18 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,745 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + ADDQ $31, DX + ANDQ $~31, DX + + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(DX) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(DX), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(R10), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 00000000..5fa1b328 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 && amd64 && gc && !purego +// +build !go1.7,amd64,gc,!purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 00000000..ae75eb9a --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,279 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(R10), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 00000000..3168a8aa --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 00000000..b0137cdf --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,12 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 00000000..52c414db --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 00000000..9d863396 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000..904b57e0 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 00000000..c971a99f --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,212 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt // import "golang.org/x/crypto/scrypt" + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + x4 ^= bits.RotateLeft32(x0+x12, 7) + x8 ^= bits.RotateLeft32(x4+x0, 9) + x12 ^= bits.RotateLeft32(x8+x4, 13) + x0 ^= bits.RotateLeft32(x12+x8, 18) + + x9 ^= bits.RotateLeft32(x5+x1, 7) + x13 ^= bits.RotateLeft32(x9+x5, 9) + x1 ^= bits.RotateLeft32(x13+x9, 13) + x5 ^= bits.RotateLeft32(x1+x13, 18) + + x14 ^= bits.RotateLeft32(x10+x6, 7) + x2 ^= bits.RotateLeft32(x14+x10, 9) + x6 ^= bits.RotateLeft32(x2+x14, 13) + x10 ^= bits.RotateLeft32(x6+x2, 18) + + x3 ^= bits.RotateLeft32(x15+x11, 7) + x7 ^= bits.RotateLeft32(x3+x15, 9) + x11 ^= bits.RotateLeft32(x7+x3, 13) + x15 ^= bits.RotateLeft32(x11+x7, 18) + + x1 ^= bits.RotateLeft32(x0+x3, 7) + x2 ^= bits.RotateLeft32(x1+x0, 9) + x3 ^= bits.RotateLeft32(x2+x1, 13) + x0 ^= bits.RotateLeft32(x3+x2, 18) + + x6 ^= bits.RotateLeft32(x5+x4, 7) + x7 ^= bits.RotateLeft32(x6+x5, 9) + x4 ^= bits.RotateLeft32(x7+x6, 13) + x5 ^= bits.RotateLeft32(x4+x7, 18) + + x11 ^= bits.RotateLeft32(x10+x9, 7) + x8 ^= bits.RotateLeft32(x11+x10, 9) + x9 ^= bits.RotateLeft32(x8+x11, 13) + x10 ^= bits.RotateLeft32(x9+x8, 18) + + x12 ^= bits.RotateLeft32(x15+x14, 7) + x13 ^= bits.RotateLeft32(x12+x15, 9) + x14 ^= bits.RotateLeft32(x13+x12, 13) + x15 ^= bits.RotateLeft32(x14+x13, 18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + R := 32 * r + x := xy + y := xy[R:] + + j := 0 + for i := 0; i < R; i++ { + x[i] = binary.LittleEndian.Uint32(b[j:]) + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*R:], x, R) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*R:], y, R) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*R:], R) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*R:], R) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:R] { + binary.LittleEndian.PutUint32(b[j:], v) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 00000000..7caeeaa6 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,191 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// https://publicsuffix.org/ +// +// A public suffix is one under which Internet users can directly register +// names. It is related to, but different from, a TLD (top level domain). +// +// "com" is a TLD (top level domain). Top level means it has no dots. +// +// "com" is also a public suffix. Amazon and Google have registered different +// siblings under that domain: "amazon.com" and "google.com". +// +// "au" is another TLD, again because it has no dots. But it's not "amazon.au". +// Instead, it's "amazon.com.au". +// +// "com.au" isn't an actual TLD, because it's not at the top level (it has +// dots). But it is an eTLD (effective TLD), because that's the branching point +// for domain name registrars. +// +// Another name for "an eTLD" is "a public suffix". Often, what's more of +// interest is the eTLD+1, or one more label than the public suffix. For +// example, browsers partition read/write access to HTTP cookies according to +// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from +// "google.com.au", but web pages served from "maps.google.com" can share +// cookies from "www.google.com", so you don't have to sign into Google Maps +// separately from signing into Google Web Search. Note that all four of those +// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, +// the last two are not (but share the same eTLD+1: "google.com"). +// +// All of these domains have the same eTLD+1: +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// +// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". +// +// There is no closed form algorithm to calculate the eTLD of a domain. +// Instead, the calculation is data driven. This package provides a +// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at +// https://publicsuffix.org/ +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is either a +// privately managed domain (and in practice, not a top level domain) or an +// unmanaged top level domain (and not explicitly mentioned in the +// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN +// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and +// "cromulent" is an unmanaged top level domain. +// +// Use cases for distinguishing ICANN domains like "foo.com" from private +// domains like "foo.appspot.com" can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, icannNode, wildcard := domain, len(domain), false, false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + icann = icannNode + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := uint32(nodeValue(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) + icannNode = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 00000000..c61f95a0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 00000000..ccf542a7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 00000000..0af2f248 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 00000000..fa7cdb9b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc +// +build 386 amd64 amd64p32 +// +build gc + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 00000000..2aff3189 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo +// +build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 00000000..4bfbda61 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,23 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 00000000..a4605e6d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,38 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 00000000..863d415a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} + +// gccgo doesn't build on Darwin, per: +// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 +func darwinSupportsAVX512() bool { + return false +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 00000000..159a686f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 +// +build !386,!amd64,!amd64p32,!arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 00000000..2057006d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 00000000..79a38a0b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,71 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + if err := readHWCAP(); err != nil { + // failed to read /proc/self/auxv, try reading registers directly + readARM64Registers() + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 00000000..6000db4c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,24 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) +// +build linux +// +build mips64 mips64le + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 00000000..f4992b1a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 00000000..021356d6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) +// +build linux +// +build ppc64 ppc64le + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 00000000..1517ac61 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 00000000..0f57b05b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 +// +build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 00000000..f4063c66 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 00000000..07c4e36d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 00000000..ebfb3fc8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 00000000..85b64d5c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 00000000..054ba05d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 00000000..d7b4fb4c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,10 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm +// +build !linux,arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 00000000..f3cde129 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && !openbsd && arm64 +// +build !linux,!netbsd,!openbsd,arm64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 00000000..0dafe964 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) +// +build !linux +// +build mips64 mips64le + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 00000000..060d46b6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) +// +build !aix +// +build !linux +// +build ppc64 ppc64le + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 00000000..dd10eb79 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 +// +build !linux,riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 00000000..4e8acd16 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 00000000..bd6c128a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 +// +build riscv64 + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 00000000..5881b883 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 00000000..96f81e20 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 00000000..7747d888 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,18 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 00000000..f5aacfc8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,145 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 +// +build 386 amd64 amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. + // Since users can't rely on mask register contents, let's not advertise AVX-512 support. + // See issue 49233. + osSupportsAVX512 = false + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 00000000..96134157 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo +// +build aix,gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 00000000..904be42f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc +// +build aix,ppc64,gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/gopkg.in/ini.v1/.editorconfig b/vendor/gopkg.in/ini.v1/.editorconfig new file mode 100644 index 00000000..4a2d9180 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*_test.go] +trim_trailing_whitespace = false diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/gopkg.in/ini.v1/.gitignore new file mode 100644 index 00000000..588388bd --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.gitignore @@ -0,0 +1,7 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea +/.vscode +.DS_Store diff --git a/vendor/gopkg.in/ini.v1/.golangci.yml b/vendor/gopkg.in/ini.v1/.golangci.yml new file mode 100644 index 00000000..631e3692 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + staticcheck: + checks: [ + "all", + "-SA1019" # There are valid use cases of strings.Title + ] + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports + - unparam diff --git a/vendor/gopkg.in/ini.v1/LICENSE b/vendor/gopkg.in/ini.v1/LICENSE new file mode 100644 index 00000000..d361bbcd --- /dev/null +++ b/vendor/gopkg.in/ini.v1/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/ini.v1/Makefile b/vendor/gopkg.in/ini.v1/Makefile new file mode 100644 index 00000000..f3b0dae2 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/Makefile @@ -0,0 +1,15 @@ +.PHONY: build test bench vet coverage + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -test.bench=. -test.benchmem + +vet: + go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md new file mode 100644 index 00000000..30606d97 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/README.md @@ -0,0 +1,43 @@ +# INI + +[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) +[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) +[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +## Features + +- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +The minimum requirement of Go is **1.13**. + +```sh +$ go get gopkg.in/ini.v1 +``` + +Please add `-u` flag to update in the future. + +## Getting Help + +- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- 中国大陆镜像:https://ini.unknwon.cn + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml new file mode 100644 index 00000000..e02ec84b --- /dev/null +++ b/vendor/gopkg.in/ini.v1/codecov.yml @@ -0,0 +1,16 @@ +coverage: + range: "60...95" + status: + project: + default: + threshold: 1% + informational: true + patch: + defualt: + only_pulls: true + informational: true + +comment: + layout: 'diff' + +github_checks: false diff --git a/vendor/gopkg.in/ini.v1/data_source.go b/vendor/gopkg.in/ini.v1/data_source.go new file mode 100644 index 00000000..c3a541f1 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/data_source.go @@ -0,0 +1,76 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" +) + +var ( + _ dataSource = (*sourceFile)(nil) + _ dataSource = (*sourceData)(nil) + _ dataSource = (*sourceReadCloser)(nil) +) + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + case io.Reader: + return &sourceReadCloser{ioutil.NopCloser(s)}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type %q", s) + } +} diff --git a/vendor/gopkg.in/ini.v1/deprecated.go b/vendor/gopkg.in/ini.v1/deprecated.go new file mode 100644 index 00000000..48b8e66d --- /dev/null +++ b/vendor/gopkg.in/ini.v1/deprecated.go @@ -0,0 +1,22 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +var ( + // Deprecated: Use "DefaultSection" instead. + DEFAULT_SECTION = DefaultSection + // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore = SnackCase +) diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/gopkg.in/ini.v1/error.go new file mode 100644 index 00000000..f66bc94b --- /dev/null +++ b/vendor/gopkg.in/ini.v1/error.go @@ -0,0 +1,49 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. +type ErrDelimiterNotFound struct { + Line string +} + +// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} + +// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. +type ErrEmptyKeyName struct { + Line string +} + +// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. +func IsErrEmptyKeyName(err error) bool { + _, ok := err.(ErrEmptyKeyName) + return ok +} + +func (err ErrEmptyKeyName) Error() string { + return fmt.Sprintf("empty key name: %s", err.Line) +} diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go new file mode 100644 index 00000000..f8b22408 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/file.go @@ -0,0 +1,541 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of one or more INI files in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // To keep track of the index of a section with same name. + // This meta list is only used with non-unique section names are allowed. + sectionIndexes []int + + // Actual data is stored here. + sections map[string][]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } + if len(opts.KeyValueDelimiterOnWrite) == 0 { + opts.KeyValueDelimiterOnWrite = "=" + } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } + + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string][]*Section), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty(opts ...LoadOptions) *File { + var opt LoadOptions + if len(opts) > 0 { + opt = opts[0] + } + + // Ignore error here, we are sure our data is good. + f, _ := LoadSources(opt, []byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("empty section name") + } + + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { + return f.sections[name][0], nil + } + + f.sectionList = append(f.sectionList, name) + + // NOTE: Append to indexes must happen before appending to sections, + // otherwise index will have off-by-one problem. + f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) + + sec := newSection(f, name) + f.sections[name] = append(f.sections[name], sec) + + return sec, nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + secs, err := f.SectionsByName(name) + if err != nil { + return nil, err + } + + return secs[0], err +} + +// HasSection returns true if the file contains a section with given name. +func (f *File) HasSection(name string) bool { + section, _ := f.GetSection(name) + return section != nil +} + +// SectionsByName returns all sections with given name. +func (f *File) SectionsByName(name string) ([]*Section, error) { + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + secs := f.sections[name] + if len(secs) == 0 { + return nil, fmt.Errorf("section %q does not exist", name) + } + + return secs, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + if name == "" { + name = DefaultSection + } + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// SectionWithIndex assumes named section exists and returns a new section when not. +func (f *File) SectionWithIndex(name string, index int) *Section { + secs, err := f.SectionsByName(name) + if err != nil || len(secs) <= index { + // NOTE: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + newSec, _ := f.NewSection(name) + return newSec + } + + return secs[index] +} + +// Sections returns a list of Section stored in the current instance. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name][f.sectionIndexes[i]] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section or all sections with given name. +func (f *File) DeleteSection(name string) { + secs, err := f.SectionsByName(name) + if err != nil { + return + } + + for i := 0; i < len(secs); i++ { + // For non-unique sections, it is always needed to remove the first one so + // in the next iteration, the subsequent section continue having index 0. + // Ignoring the error as index 0 never returns an error. + _ = f.DeleteSectionWithIndex(name, 0) + } +} + +// DeleteSectionWithIndex deletes a section with given name and index. +func (f *File) DeleteSectionWithIndex(name string, index int) error { + if !f.options.AllowNonUniqueSections && index != 0 { + return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") + } + + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + // Count occurrences of the sections + occurrences := 0 + + sectionListCopy := make([]string, len(f.sectionList)) + copy(sectionListCopy, f.sectionList) + + for i, s := range sectionListCopy { + if s != name { + continue + } + + if occurrences == index { + if len(f.sections[name]) <= 1 { + delete(f.sections, name) // The last one in the map + } else { + f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) + } + + // Fix section lists + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) + + } else if occurrences > index { + // Fix the indices of all following sections with this name. + f.sectionIndexes[i-1]-- + } + + occurrences++ + } + + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + _ = f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + if f.options.ShortCircuit { + return nil + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + lastSectionIdx := len(f.sectionList) - 1 + for i, sname := range f.sectionList { + sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + isLastSection := i == lastSectionIdx + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modified if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KeyList: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + strings.TrimSpace(lines[i]) + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + writeKeyValue := func(val string) (bool, error) { + if _, err := buf.WriteString(kname); err != nil { + return false, err + } + + if key.isBooleanType { + buf.WriteString(LineBreak) + return true, nil + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return false, err + } + return false, nil + } + + shadows := key.ValueWithShadows() + if len(shadows) == 0 { + if _, err := writeKeyValue(""); err != nil { + return nil, err + } + } + + for _, val := range shadows { + exitLoop, err := writeKeyValue(val) + if err != nil { + return nil, err + } else if exitLoop { + continue KeyList + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename after done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/gopkg.in/ini.v1/helper.go b/vendor/gopkg.in/ini.v1/helper.go new file mode 100644 index 00000000..f9d80a68 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/helper.go @@ -0,0 +1,24 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go new file mode 100644 index 00000000..99e7f865 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -0,0 +1,176 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "os" + "regexp" + "runtime" + "strings" +) + +const ( + // Maximum allowed depth when recursively substituing variable names. + depthValues = 99 +) + +var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + + // LineBreak is the delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) + + // DefaultHeader explicitly writes default section header. + DefaultHeader = false + + // PrettySection indicates whether to put a line between sections. + PrettySection = true + // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. + PrettyEqual = false + // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatLeft = "" + // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatRight = "" +) + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +func init() { + if runtime.GOOS == "windows" && !inTest { + LineBreak = "\r\n" + } +} + +// LoadOptions contains all customized options used for load data source(s). +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". + KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string + // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). + PreserveSurroundedQuote bool + // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). + DebugFunc DebugFunc + // ReaderBufferSize is the buffer size of the reader in bytes. + ReaderBufferSize int + // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. + AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool +} + +// DebugFunc is the type of function called to log parse events. +type DebugFunc func(message string) + +// LoadSources allows caller to apply customized options for loading from data source(s). +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go new file mode 100644 index 00000000..a19d9f38 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/key.go @@ -0,0 +1,837 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { + return nil + } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +// AddNestedValue adds a nested value to the key. +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. Shadow +// keys with empty values are ignored from the returned list. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + if k.value == "" { + return []string{} + } + return []string{k.value} + } + + vals := make([]string, 0, len(k.shadows)+1) + if k.value != "" { + vals = append(vals, k.value) + } + for _, s := range k.shadows { + if s.value != "" { + vals = append(vals, s.value) + } + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < depthValues; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := vr[2 : len(vr)-2] + + // Search in the same section. + // If not found or found the key itself, then search again in default section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + nk, _ = k.s.f.Section("").GetKey(noption) + if nk == nil { + // Stop when no results found in the default section, + // and returns the value as-is. + break + } + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + v, err := strconv.ParseInt(k.String(), 0, 64) + return int(v), err +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 0, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 0, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 0, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx++ + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Bools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidBools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictBools returns list of bool divided by given delimiter or error on first invalid input. +func (k *Key) StrictBools(delim string) ([]bool, error) { + return k.parseBools(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseBools transforms strings to bools. +func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { + vals := make([]bool, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := parseBool(str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(bool)) + } + } + return vals, err +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseFloat(str, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(float64)) + } + } + return vals, err +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, int(val.(int64))) + } + } + return vals, err +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(int64)) + } + } + return vals, err +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, uint(val.(uint64))) + } + } + return vals, err +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(uint64)) + } + } + return vals, err +} + +type Parser func(str string) (interface{}, error) + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := time.Parse(format, str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(time.Time)) + } + } + return vals, err +} + +// doParse transforms strings to different types +func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { + vals := make([]interface{}, 0, len(strs)) + for _, str := range strs { + val, err := parser(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go new file mode 100644 index 00000000..44fc526c --- /dev/null +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -0,0 +1,520 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +const minReaderBufferSize = 4096 + +var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) + +type parserOptions struct { + IgnoreContinuation bool + IgnoreInlineComment bool + AllowPythonMultilineValues bool + SpaceBeforeInlineComment bool + UnescapeValueDoubleQuotes bool + UnescapeValueCommentSymbols bool + PreserveSurroundedQuote bool + DebugFunc DebugFunc + ReaderBufferSize int +} + +type parser struct { + buf *bufio.Reader + options parserOptions + + isEOF bool + count int + comment *bytes.Buffer +} + +func (p *parser) debug(format string, args ...interface{}) { + if p.options.DebugFunc != nil { + p.options.DebugFunc(fmt.Sprintf(format, args...)) + } +} + +func newParser(r io.Reader, opts parserOptions) *parser { + size := opts.ReaderBufferSize + if size < minReaderBufferSize { + size = minReaderBufferSize + } + + return &parser{ + buf: bufio.NewReaderSize(r, size), + options: opts, + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + _, err = p.buf.Read(mask) + if err != nil { + return err + } + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + _, err = p.buf.Read(mask) + if err != nil { + return err + } + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(delimiters string, in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + var endIdx int + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], delimiters) + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, delimiters) + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + if endIdx == 0 { + return "", -1, ErrEmptyKeyName{line} + } + + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, bufferSize int) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + return "", nil + } + + var valQuote string + if len(line) > 3 && line[0:3] == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !p.options.IgnoreInlineComment { + var i int + if p.options.SpaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if (hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") + } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + + return line, nil +} + +func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { + parserBufferPeekResult, _ := p.buf.Peek(bufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil && peekErr != io.EOF { + p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) + return "", peekErr + } + + p.debug("readPythonMultilines: parsing %q", string(peekData)) + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) + for n, v := range peekMatches { + p.debug(" %d: %q", n, v) + } + + // Return if not a Python multiline value. + if len(peekMatches) != 3 { + p.debug("readPythonMultilines: end of value, got: %q", line) + return line, nil + } + + // Advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.buf.Discard(len(peekData)) + if err != nil { + p.debug("readPythonMultilines: failed to skip to the end, returning error") + return "", err + } + + line += "\n" + peekMatches[0] + } +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader, parserOptions{ + IgnoreContinuation: f.options.IgnoreContinuation, + IgnoreInlineComment: f.options.IgnoreInlineComment, + AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, + SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, + UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, + UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, + PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, + DebugFunc: f.options.DebugFunc, + ReaderBufferSize: f.options.ReaderBufferSize, + }) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DefaultSection + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(DefaultSection) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 4kb at a time. + currentPeekSize := minReaderBufferSize + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + if err != nil { + return err + } + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset auto-counter and comments + p.comment.Reset() + p.count = 1 + // Nested values can't span sections + isLastValueEmpty = false + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) + if err != nil { + switch { + // Treat as boolean key when desired, and whole line is key name. + case IsErrDelimiterNotFound(err): + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, parserBufferSize) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], parserBufferSize) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go new file mode 100644 index 00000000..a3615d82 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/section.go @@ -0,0 +1,256 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } + break + } + return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Deprecated: Use "HasKey" instead. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := make(map[string]string, len(s.keysHash)) + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + delete(s.keysHash, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + s.f.options.ChildSectionDelimiter + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]...) + } + } + return children +} diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go new file mode 100644 index 00000000..a486b2fe --- /dev/null +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -0,0 +1,747 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // SnackCase converts to format SNACK_CASE. + SnackCase NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= 'A' - 'a' + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflect.Bool: + vals, err = key.parseBools(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflect.Bool: + slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to struct. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + vt := t + isPtr := t.Kind() == reflect.Ptr + if isPtr { + vt = t.Elem() + } + switch vt.Kind() { + case reflect.String: + stringVal := key.String() + if isPtr { + field.Set(reflect.ValueOf(&stringVal)) + } else if len(stringVal) > 0 { + field.SetString(key.String()) + } + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&boolVal)) + } else { + field.SetBool(boolVal) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // ParseDuration will not return err for `0`, so check the type name + if vt.Name() == "Duration" { + durationVal, err := key.Duration() + if err != nil { + if intVal, err := key.Int64(); err == nil { + field.SetInt(intVal) + return nil + } + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else if int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetInt(intVal) + field.Set(pv) + } else { + field.SetInt(intVal) + } + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && uint64(durationVal) > 0 { + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetUint(uintVal) + field.Set(pv) + } else { + field.SetUint(uintVal) + } + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetFloat(floatVal) + field.Set(pv) + } else { + field.SetFloat(floatVal) + } + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&timeVal)) + } else { + field.Set(reflect.ValueOf(timeVal)) + } + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) + rawName = opts[0] + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") + } + return rawName, omitEmpty, allowShadow, allowNonUnique, extends +} + +// mapToField maps the given value to the matching field of the given section. +// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isStruct := tpField.Type.Kind() == reflect.Struct + isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { + if secs, err := s.f.SectionsByName(fieldName); err == nil { + if len(secs) <= sectionIndex { + return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) + } + // Only set the field to non-nil struct value if we have a section for it. + // Otherwise, we end up with a non-nil struct ptr even though there is no data. + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + continue + } + } + + // Map non-unique sections + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + newField, err := s.mapToSlice(fieldName, field, isStrict) + if err != nil { + return fmt.Errorf("map to slice %q: %v", fieldName, err) + } + + field.Set(newField) + continue + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("set field %q: %v", fieldName, err) + } + } + } + return nil +} + +// mapToSlice maps all sections with the same name and returns the new value. +// The type of the Value must be a slice. +func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { + secs, err := s.f.SectionsByName(secName) + if err != nil { + return reflect.Value{}, err + } + + typ := val.Type().Elem() + for i, sec := range secs { + elem := reflect.New(typ) + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { + return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) + } + + val = reflect.Append(val, elem.Elem()) + } + return val, nil +} + +// mapTo maps a section to object v. +func (s *Section) mapTo(v interface{}, isStrict bool) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + if typ.Kind() == reflect.Slice { + newField, err := s.mapToSlice(s.name, val, isStrict) + if err != nil { + return err + } + + val.Set(newField) + return nil + } + + return s.mapToField(val, isStrict, 0, s.name) +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + return s.mapTo(v, false) +} + +// StrictMapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + return s.mapTo(v, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// StrictMapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapToWithMapper maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + if allowShadow { + var keyWithShadows *Key + for i := 0; i < field.Len(); i++ { + var val string + switch sliceOf { + case reflect.String: + val = slice.Index(i).String() + case reflect.Int, reflect.Int64: + val = fmt.Sprint(slice.Index(i).Int()) + case reflect.Uint, reflect.Uint64: + val = fmt.Sprint(slice.Index(i).Uint()) + case reflect.Float64: + val = fmt.Sprint(slice.Index(i).Float()) + case reflect.Bool: + val = fmt.Sprint(slice.Index(i).Bool()) + case reflectTime: + val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + if i == 0 { + keyWithShadows = newKey(key.s, key.name, val) + } else { + _ = keyWithShadows.AddShadow(val) + } + } + *key = *keyWithShadows + return nil + } + + var buf bytes.Buffer + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflect.Bool: + buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-len(delim)]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim, allowShadow) + case reflect.Ptr: + if !field.IsNil() { + return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) + } + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. +type StructReflector interface { + ReflectINIStruct(*File) error +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + if !val.Field(i).CanInterface() { + continue + } + + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + if omitEmpty && isEmptyValue(field) { + continue + } + + if r, ok := field.Interface().(StructReflector); ok { + return r.ReflectINIStruct(s.f) + } + + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + for i := 0; i < field.Len(); i++ { + if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { + return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) + } + + sec, err := s.f.NewSection(fieldName) + if err != nil { + return err + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err := sec.reflectFrom(slice.Index(i)); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + } + continue + } + + // Note: Same reason as section. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + delim := parseDelim(tpField.Tag.Get("delim")) + if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("reflect field %q: %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects section from given struct. It overwrites existing ones. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + + if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && + (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { + // Clear sections to make sure none exists before adding the new ones + s.f.DeleteSection(s.name) + + if typ.Kind() == reflect.Ptr { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + return sec.reflectFrom(val.Elem()) + } + + slice := val.Slice(0, val.Len()) + sliceOf := val.Type().Elem().Kind() + if sliceOf != reflect.Ptr { + return fmt.Errorf("not a slice of pointers") + } + + for i := 0; i < slice.Len(); i++ { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + + err = sec.reflectFrom(slice.Index(i)) + if err != nil { + return fmt.Errorf("reflect from %dth field: %v", i, err) + } + } + + return nil + } + + if typ.Kind() == reflect.Ptr { + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFromWithMapper reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a29462e9..c8bac569 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -77,6 +77,9 @@ github.com/docker/distribution/reference # github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 ## explicit github.com/docker/libtrust +# github.com/dustin/go-humanize v1.0.0 +## explicit +github.com/dustin/go-humanize # github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09 ## explicit github.com/dyatlov/go-opengraph/opengraph @@ -84,13 +87,13 @@ github.com/dyatlov/go-opengraph/opengraph ## explicit github.com/emicklei/go-restful github.com/emicklei/go-restful/log -# github.com/evanphx/json-patch v4.12.0+incompatible +# github.com/evanphx/json-patch v5.6.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/francoispqt/gojay v1.2.13 ## explicit; go 1.12 github.com/francoispqt/gojay -# github.com/go-logr/logr v1.2.2 +# github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr # github.com/go-openapi/jsonpointer v0.19.5 @@ -142,12 +145,23 @@ github.com/gorilla/mux # github.com/gorilla/websocket v1.5.0 ## explicit; go 1.12 github.com/gorilla/websocket -# github.com/hashicorp/errwrap v1.1.0 -## explicit -github.com/hashicorp/errwrap -# github.com/hashicorp/go-multierror v1.1.1 +# github.com/graph-gophers/graphql-go v1.3.0 ## explicit; go 1.13 -github.com/hashicorp/go-multierror +github.com/graph-gophers/graphql-go +github.com/graph-gophers/graphql-go/decode +github.com/graph-gophers/graphql-go/errors +github.com/graph-gophers/graphql-go/internal/common +github.com/graph-gophers/graphql-go/internal/exec +github.com/graph-gophers/graphql-go/internal/exec/packer +github.com/graph-gophers/graphql-go/internal/exec/resolvable +github.com/graph-gophers/graphql-go/internal/exec/selected +github.com/graph-gophers/graphql-go/internal/query +github.com/graph-gophers/graphql-go/internal/schema +github.com/graph-gophers/graphql-go/internal/validation +github.com/graph-gophers/graphql-go/introspection +github.com/graph-gophers/graphql-go/log +github.com/graph-gophers/graphql-go/trace +github.com/graph-gophers/graphql-go/types # github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5 ## explicit; go 1.12 github.com/heroku/docker-registry-client/registry @@ -163,8 +177,12 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/kr/pretty v0.3.0 -## explicit; go 1.12 +# github.com/klauspost/compress v1.15.1 +## explicit; go 1.15 +github.com/klauspost/compress/s2 +# github.com/klauspost/cpuid/v2 v2.0.12 +## explicit; go 1.15 +github.com/klauspost/cpuid/v2 # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer @@ -179,17 +197,18 @@ github.com/mattermost/go-i18n/i18n/translation # github.com/mattermost/ldap v3.0.4+incompatible ## explicit github.com/mattermost/ldap -# github.com/mattermost/logr v1.0.5 +# github.com/mattermost/logr/v2 v2.0.15 ## explicit; go 1.12 -github.com/mattermost/logr -github.com/mattermost/logr/format -github.com/mattermost/logr/target -# github.com/mattermost/mattermost-cloud v0.69.1-0.20230117143751-957e6fd0e408 +github.com/mattermost/logr/v2 +github.com/mattermost/logr/v2/config +github.com/mattermost/logr/v2/formatters +github.com/mattermost/logr/v2/targets +# github.com/mattermost/mattermost-cloud v0.71.0 ## explicit; go 1.19 github.com/mattermost/mattermost-cloud/k8s github.com/mattermost/mattermost-cloud/model -# github.com/mattermost/mattermost-operator v1.19.0-rc.2 -## explicit; go 1.17 +# github.com/mattermost/mattermost-operator v1.20.1 +## explicit; go 1.19 github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1 github.com/mattermost/mattermost-operator/apis/mattermost/v1beta1 github.com/mattermost/mattermost-operator/pkg/client/clientset/versioned @@ -199,14 +218,15 @@ github.com/mattermost/mattermost-operator/pkg/client/v1beta1/clientset/versioned github.com/mattermost/mattermost-operator/pkg/client/v1beta1/clientset/versioned/scheme github.com/mattermost/mattermost-operator/pkg/client/v1beta1/clientset/versioned/typed/mattermost/v1beta1 github.com/mattermost/mattermost-operator/pkg/utils -# github.com/mattermost/mattermost-server/v5 v5.26.0 -## explicit; go 1.14 -github.com/mattermost/mattermost-server/v5/mlog -github.com/mattermost/mattermost-server/v5/model -github.com/mattermost/mattermost-server/v5/services/timezones -github.com/mattermost/mattermost-server/v5/utils/fileutils -github.com/mattermost/mattermost-server/v5/utils/jsonutils -github.com/mattermost/mattermost-server/v5/utils/markdown +# github.com/mattermost/mattermost-server/v6 v6.7.2 +## explicit; go 1.16 +github.com/mattermost/mattermost-server/v6/model +github.com/mattermost/mattermost-server/v6/services/timezones +github.com/mattermost/mattermost-server/v6/shared/filestore +github.com/mattermost/mattermost-server/v6/shared/i18n +github.com/mattermost/mattermost-server/v6/shared/markdown +github.com/mattermost/mattermost-server/v6/shared/mlog +github.com/mattermost/mattermost-server/v6/utils/jsonutils # github.com/mattermost/rotator v0.2.0 ## explicit; go 1.15 github.com/mattermost/rotator/aws @@ -216,6 +236,28 @@ github.com/mattermost/rotator/rotator # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/minio/md5-simd v1.1.2 +## explicit; go 1.14 +github.com/minio/md5-simd +# github.com/minio/minio-go/v7 v7.0.24 +## explicit; go 1.17 +github.com/minio/minio-go/v7 +github.com/minio/minio-go/v7/pkg/credentials +github.com/minio/minio-go/v7/pkg/encrypt +github.com/minio/minio-go/v7/pkg/lifecycle +github.com/minio/minio-go/v7/pkg/notification +github.com/minio/minio-go/v7/pkg/replication +github.com/minio/minio-go/v7/pkg/s3utils +github.com/minio/minio-go/v7/pkg/set +github.com/minio/minio-go/v7/pkg/signer +github.com/minio/minio-go/v7/pkg/sse +github.com/minio/minio-go/v7/pkg/tags +# github.com/minio/sha256-simd v1.0.0 +## explicit; go 1.13 +github.com/minio/sha256-simd +# github.com/mitchellh/go-homedir v1.1.0 +## explicit +github.com/mitchellh/go-homedir # github.com/moby/spdystream v0.2.0 ## explicit; go 1.13 github.com/moby/spdystream @@ -229,8 +271,6 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/gomega v1.18.1 -## explicit; go 1.16 # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -238,12 +278,20 @@ github.com/opencontainers/go-digest ## explicit github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 +# github.com/opentracing/opentracing-go v1.2.0 +## explicit; go 1.14 +github.com/opentracing/opentracing-go +github.com/opentracing/opentracing-go/ext +github.com/opentracing/opentracing-go/log # github.com/pborman/uuid v1.2.1 ## explicit github.com/pborman/uuid # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml +# github.com/philhofer/fwd v1.1.1 +## explicit +github.com/philhofer/fwd # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -279,8 +327,9 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/rogpeppe/go-internal v1.8.0 -## explicit; go 1.11 +# github.com/rs/xid v1.4.0 +## explicit; go 1.12 +github.com/rs/xid # github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -294,38 +343,33 @@ github.com/slok/sloth/pkg/kubernetes/gen/clientset/versioned/typed/sloth/v1 # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.8.1 +# github.com/stretchr/testify v1.8.2 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/wiggin77/cfg v1.0.2 -## explicit; go 1.12 -github.com/wiggin77/cfg -github.com/wiggin77/cfg/ini -github.com/wiggin77/cfg/timeconv +# github.com/tinylib/msgp v1.1.6 +## explicit; go 1.14 +github.com/tinylib/msgp/msgp +# github.com/vmihailenco/msgpack/v5 v5.3.5 +## explicit; go 1.11 +github.com/vmihailenco/msgpack/v5 +github.com/vmihailenco/msgpack/v5/msgpcode +# github.com/vmihailenco/tagparser/v2 v2.0.0 +## explicit; go 1.15 +github.com/vmihailenco/tagparser/v2 +github.com/vmihailenco/tagparser/v2/internal +github.com/vmihailenco/tagparser/v2/internal/parser # github.com/wiggin77/merror v1.0.3 ## explicit; go 1.15 github.com/wiggin77/merror # github.com/wiggin77/srslog v1.0.1 ## explicit; go 1.14 github.com/wiggin77/srslog -# go.uber.org/atomic v1.9.0 -## explicit; go 1.13 -go.uber.org/atomic -# go.uber.org/multierr v1.8.0 -## explicit; go 1.14 -go.uber.org/multierr -# go.uber.org/zap v1.19.1 -## explicit; go 1.13 -go.uber.org/zap -go.uber.org/zap/buffer -go.uber.org/zap/internal/bufferpool -go.uber.org/zap/internal/color -go.uber.org/zap/internal/exit -go.uber.org/zap/zapcore # golang.org/x/crypto v0.1.0 ## explicit; go 1.17 +golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/openpgp @@ -334,6 +378,8 @@ golang.org/x/crypto/openpgp/elgamal golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k +golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/scrypt # golang.org/x/net v0.1.0 ## explicit; go 1.17 golang.org/x/net/context @@ -346,12 +392,14 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/socks golang.org/x/net/proxy +golang.org/x/net/publicsuffix # golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sys v0.1.0 ## explicit; go 1.17 +golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix @@ -417,11 +465,12 @@ google.golang.org/protobuf/types/known/timestamppb # gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d ## explicit gopkg.in/asn1-ber.v1 -# gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c -## explicit; go 1.11 # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 +# gopkg.in/ini.v1 v1.67.0 +## explicit +gopkg.in/ini.v1 # gopkg.in/natefinch/lumberjack.v2 v2.0.0 ## explicit gopkg.in/natefinch/lumberjack.v2